diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td index 1ae1feed177ae..d93ffb70881bd 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td @@ -76,10 +76,10 @@ def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface For the case of dynamic memrefs or pointer, the shape and layout information of the memory region should be explicitly passed via `shape` and `strides` parameters. - - `offsets`: index values represents offsets from the "source" at the each dimension + - `offsets`: [optional] index values represents offsets from the "source" at the each dimension at which the subview of the target memory will be created. It is encoded via "offsets" and "const_offsets", such that it can accept various forms, such as, - operands (e.g., [%c0, %c]) and attributes (e.g., [2, 4]). + operands (e.g., [%c0, %c]) and attributes (e.g., [2, 4]). Offsets is optional and may be set at load_nd, store_nd, and prefetch_nd. - `shape`: the shape information of the memory region pointed by the "source". It is typically encoded via the MemRefType of the source, e.g., memref<4096x4096xf16>. @@ -236,7 +236,7 @@ def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface return static_cast(MemorySpace::Global); } - xegpu::DistributeLayoutAttr getLayoutAttr() { + xegpu::DistributeLayoutAttr getDescLayoutAttr() { return dyn_cast_if_present(getType().getLayout()); } @@ -253,12 +253,32 @@ def XeGPU_PrefetchNdOp : XeGPU_Op<"prefetch_nd", []> { It issues an instruction to prefetch a block of data from continuous memory regions to each level of the cache based on their cache policy. - Example: + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + - `TensorDesc`: A tensor descriptor specifying the base nd-region of + memory and tensor tile to be prefetched. + + - `offsets`: [optional] index values representing per-dimension offsets from the + base position encoded in `TensorDesc`. It is encoded via "offsets" + and "const_offsets". + + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] An cache-hint attribute + indicating the desired behavior at the L1, L2, and L3 cache levels. + + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand. + Only valid at the workgroup and subgroup levels. + + Example (Workgroup level): ```mlir - xegpu.prefetch_nd %tdesc {l1_hint = #xegpu.cache_hint, + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + xegpu.prefetch_nd %tdesc[%c0, %c1] {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} - : !xegpu.tensor_desc<8x16xf16> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout } + : !xegpu.tensor_desc<32x256xf16> ``` }]; @@ -268,7 +288,8 @@ def XeGPU_PrefetchNdOp : XeGPU_Op<"prefetch_nd", []> { OptionalAttr: $const_offsets, OptionalAttr: $l1_hint, OptionalAttr: $l2_hint, - OptionalAttr: $l3_hint); + OptionalAttr: $l3_hint, + OptionalAttr:$layout); let extraClassDeclaration = extraBaseClassDeclaration # [{ xegpu::TensorDescType getTensorDescType() { @@ -283,7 +304,7 @@ def XeGPU_PrefetchNdOp : XeGPU_Op<"prefetch_nd", []> { return getMixedValues(statics, dynamics, getContext()); } - xegpu::DistributeLayoutAttr getLayoutAttr() { + xegpu::DistributeLayoutAttr getDescLayoutAttr() { return dyn_cast_if_present(getTensorDescType().getLayout()); } @@ -325,25 +346,48 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [ a block of data from memory to register. It takes a set of optional cache hints for each level of cache, L1, L2 and L3. If hardware does not have a correspoding cache, Corresponding cache hint attribute will be masked. - VNNI transformation is an hardware feature for Intel GPU, which is used to - do data packing during the load for B operand of matrix operation, if - the bit width of the data type is less then 32 bits, e.g., fp16. And - transpose is another Intel hardware feature, which will do transpose - operation when loading the data if the bit width of the data type is - fp32 or fp64. It implies that vnni and transpose cannot exit at the - same time. It is only available to 1D or 2D blocked tensor_desc. - In SIMT mode, result vector represents the data to be loaded by each work-item. + On Intel GPUs, hardware-supported packing rearranges data elements during + the load of the B operand when the element bit-width is less than 32 bits + (for example, fp16). The transpose feature reorders data during the load + when the element type is fp32 or fp64. These two features are mutually + exclusive and shall not be enabled simultaneously. Both features support only + 2D blocked tensor_desc. + + At lane level, result vector represents the data to be loaded by each lane. + + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + + - `TensorDesc`: A tensor descriptor specifying the base nd-region of memory + and the tensor tile to be loaded. - Example 1: + - `offsets`: Index values representing per-dimension offsets from the base position + encoded in `TensorDesc`. They are encoded via `offsets` and `const_offsets`. + + - `packed`: [optional] A unit attribute indicating that packing is applied + during the load when supported by the hardware. Only valid at lane level. + + - `transpose`: [optional] An attribute describing a hardware-supported transpose + to be applied during the load. Only valid at Lane level. + + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] Cache-hint attributes indicating the + desired behavior at the L1, L2, and L3 cache levels. + + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand as well as the result of the load (they are identical). Only valid at workgroup and subgroup levels. + + Example 1 (Workgroup level): ```mlir xegpu.load_nd %1 {transpose = [1, 0], l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} - : !xegpu.tensor_desc<8x16xf32> -> vector<16x8xf32> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout} + : !xegpu.tensor_desc<32x256xf32> -> vector<32x256xf32> ``` - Example 2 (SIMT mode): + Example 2 (lane level): ```mlir xegpu.load_nd %1 {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> @@ -360,7 +404,8 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [ OptionalAttr: $transpose, OptionalAttr: $l1_hint, OptionalAttr: $l2_hint, - OptionalAttr: $l3_hint); + OptionalAttr: $l3_hint, + OptionalAttr:$layout); let results = (outs XeGPU_ValueType: $value); @@ -381,7 +426,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [ return getMixedValues(statics, dynamics, getContext()); } - xegpu::DistributeLayoutAttr getLayoutAttr() { + xegpu::DistributeLayoutAttr getDescLayoutAttr() { return dyn_cast_if_present(getTensorDescType().getLayout()); } @@ -389,7 +434,6 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [ return getTensorDescType().getShape(); } - }]; let assemblyFormat = [{ @@ -428,16 +472,36 @@ def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [ Corresponding cache hint attribute will be masked. It is only available to 1D or 2D blocked tensor_desc. - In SIMT mode, the input vector represents the data to be stored by each work-item. + At lane level, the input vector represents the data to be stored by each lane. + + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + + - `value`: A vector value representing the tensor tile to be stored. + + - `TensorDesc`: A tensor descriptor specifying the base nd-region of memory and + the tensor tile to be stored. + + - `offsets`: Index values representing per-dimension offsets from the base position + encoded in `TensorDesc`. They are encoded via `offsets` and `const_offsets`. - Example 1: + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] Cache-hint attributes indicating the + desired behavior at the L1, L2, and L3 cache levels. + + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand as well as + the value to be stored (they are identical). Only valid at workgroup and subgroup levels. + + Example 1 (Workgroup level): ```mlir xegpu.store_nd %3, %2 {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} - : vector<8x16xf16>, !xegpu.tensor_desc<8x16xf16> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout} + : vector<32x256xf16>, !xegpu.tensor_desc<32x256xf16> ``` - Example 2 (SIMT mode): + Example 2 (lane level): ```mlir xegpu.store_nd %3, %2 {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, @@ -454,7 +518,8 @@ def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [ OptionalAttr: $const_offsets, OptionalAttr: $l1_hint, OptionalAttr: $l2_hint, - OptionalAttr: $l3_hint); + OptionalAttr: $l3_hint, + OptionalAttr:$layout); let extraClassDeclaration = extraBaseClassDeclaration # [{ VectorType getValueType() { @@ -473,7 +538,7 @@ def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [ return getMixedValues(statics, dynamics, getContext()); } - xegpu::DistributeLayoutAttr getLayoutAttr() { + xegpu::DistributeLayoutAttr getDescLayoutAttr() { return dyn_cast_if_present(getTensorDescType().getLayout()); } @@ -561,21 +626,22 @@ def XeGPU_CreateDescOp: XeGPU_Op<"create_tdesc", [Pure, ViewLikeOpInterface]> { "create_tdesc" is similar to "create_nd_tdesc" in terms that it creates a Tensor Descriptor (TensorDescType) for a memory region. While "create_nd_tdesc" is for creating continuous subviews, "create_tdesc" is for creating non-continuous - (scattered) subviews, allowing each work-item in a subgroup specifying their own offset. + (scattered) subviews, allowing each lane in a subgroup specifying their own offset. It accepts the following parameters: Arguments: + - `source`: a 1D memref or pointer (i64, i32, ui64, ui32) represents the flattened memory object. + - `offsets`: a vector containing offsets of each access point. Its size is fixed to the hardware supportted subgroup size, e.g., 16 on PVC, - implying each element in the vector corresponds to a work-item (SIMT lane) - in the subgroup. + implying each element in the vector corresponds to a SIMT lane in the subgroup. Results: - `res`: scattered tensor descriptor - The first dimension of the result TensorDesc corresponds to work-items, so it should + The first dimension of the result TensorDesc corresponds to lanes, so it should match the dimension of offsets. It may also has a second dimension corresponding to the chunk_size if the chunk size is larger than 1. @@ -664,27 +730,39 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> { As compared to prefetch_nd, which works on non-scattered TensorDesc, it works on scattered TensorDesc instead. + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + Arguments: + - `source`: represents the memory region to be loaded from, which can be either a tensor_desc or a 1D memref or pointer (ui64, ui32, i64 or i32). In case of tensor_desc, offsets come from the producer create_tdesc op. - tensor_desc cannot be used in SIMT mode. + tensor_desc cannot be used at lane level. + - `offsets`: represents offsets from source. required if `source` in not a TensorDescType. offsets is a vector of `index` type and vector length is either the subgroup size - or 1 in SIMT mode. scalar offset is also valid for SIMT mode. - - `l1_hint`, `l2_hint`, `l3_hint`: are optional cache hints for each level of cache. - - `offset_align_byte`: required if `source` is a pointer. If `source` is not a pointer, + or 1 at lane level. scalar offset is also valid for lane level. + + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] cache hints for each level of cache. + + - `offset_align_byte`: [optional] required if `source` is a pointer. If `source` is not a pointer, it is not allowed. Represents the alignment in bytes of each offset in offsets. - Example 1: + - `layout`: [optional] Describes the expected layout of the `tensor_desc` or `offsets` + operand. Only valid at workgroup and subgroup levels. + + Example 1 (Workgroup level): ```mlir xegpu.prefetch %tdesc {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} - : !xegpu.tensor_desc<16xf16> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout + } + : !xegpu.tensor_desc<256xf16> ``` - Example 2: + Example 2 (lane level): A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc. It combines "create scattered TensorTdesc" and "prefetch with scattered TensorTdesc". The source operand could be a raw pointer (ui64, ui32, i64, i32). @@ -698,8 +776,8 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> { : memref<1024xf32>, vector<4xindex> ``` - Example 3 (SIMT mode): - SIMT mode only accepts the offsets variant. + Example 3 (lane level): + lane level only accepts the offsets variant. ```mlir xegpu.prefetch %0[%1] {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, @@ -707,8 +785,8 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> { : memref<256xf32>, vector<1xindex> ``` - Example 4 (SIMT mode): - SIMT mode only accepts the offsets variant. + Example 4 (lane level): + lane level only accepts the offsets variant. ```mlir xegpu.prefetch %0[%1] {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, @@ -724,7 +802,8 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> { OptionalAttr:$l1_hint, OptionalAttr:$l2_hint, OptionalAttr:$l3_hint, - OptionalAttr:$offset_align_byte); + OptionalAttr:$offset_align_byte, + OptionalAttr:$layout); let extraClassDeclaration = extraBaseClassDeclaration # [{ Type getSourceType() { @@ -764,54 +843,67 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> { def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> { let summary = "load a set of scattered data points from memory."; - let description = [{ It (aka. load) load data per each work-item. The output + let description = [{ It (aka. load) load data per each lane. The output describes the data being loaded at the subgroup level, so its size is - consistent with the number of work-items in a subgroup. When the chunk size + consistent with the number of lanes in a subgroup. When the chunk size is larger than 2, the output vector is a 2D vector, with dim-0 correspoding - to work-items, and dim-1 corresponding to the chunk size loaded by each work-item. + to lanes, and dim-1 corresponding to the chunk size loaded by each lane. The mask operand masks out memory access so that it is safe to pass out-of-boundary - addresses/offsets as long as they are masked. It applies to slots of SIMD lanes. + addresses/offsets as long as they are masked. Each mask element applies to one lane. + + In lane level, the result is a 1D vector that represents the data to be loaded by + each lane. If size is not 1, size should be equal to the chunk size. - In SIMT mode, the result is a 1D vector that represents the data to be loaded by - each work-item. If size is not 1, size should be equal to the chunk size, + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. Arguments: + - `source`: represents the memory region to be loaded from, which can be either a tensor_desc or a 1D memref or pointer (ui64, ui32, i64 or i32). In case of tensor_desc, offsets come from the producer create_tdesc op. - tensor_desc cannot be used in SIMT mode. + tensor_desc cannot be used at lane level. + - `offsets`: represents offsets from source. required if `source` in not a TensorDescType. offsets is a vector of `index` type and vector length is either the subgroup size - or 1 in SIMT mode. scalar offset is also valid for SIMT mode. + or 1 at lane level. scalar offset is also valid for lane level. + - `mask`: is a vector of `i1` type, which is used to mask out the memory access. - mask is a vector of size equal to the subgroup size, or 1 in SIMT mode. - scalar mask is also valid for SIMT mode. - - `chunk_size`: (optional) represents contiguous number of elements to load from per work item. - - `l1_hint`, `l2_hint`, `l3_hint`: are optional cache hints for each level of cache. + mask is a vector of size equal to the subgroup size, or 1 at lane level. + scalar mask is also valid for lane level. + + - `chunk_size`: [optional] represents contiguous number of elements to load from per work item. + + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] cache hints for each level of cache. + + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand or the result + of load. Only valid at workgroup and subgroup levels. Results: - `res`: represents loaded data - Example 1: + Example 1 (Workgroup level): ```mlir %2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint}> - : !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr>, - vector<16xi1> -> vector<16xf32> + l3_hint = #xegpu.cache_hint}, + layout = #xegpu.layout> + : !xegpu.tensor_desc<256xf32, #xegpu.scatter_tdesc_attr>, + vector<256xi1> -> vector<256xf32> ``` - Example 2: + Example 2 (Subgroup level): ```mlir %2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint}> + l3_hint = #xegpu.cache_hint}, + layout = #xegpu.layout> : !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr>, vector<16xi1> -> vector<16x8xf32> ``` - Example 3: + Example 3 (Subgroup level): A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc. It combines "create scattered TensorTdesc" and "load with scattered TensorTdesc". The source operand could be a raw pointer (ui64, ui32, i64, i32). Please refer to create_tdesc @@ -822,12 +914,13 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> { %mask = vector.constant_mask [16]: vector<16xi1> %val = xegpu.load %a[%offsets], %mask {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout} : memref<1024xf32>, vector<16xi1>, vector<16xindex> -> vector<16xf32> ``` - Example 4 (SIMT mode): - SIMT mode only accepts the offsets variant. chunk_size can be inferred from result + Example 4 (lane level): + lane level only accepts the offsets variant. chunk_size can be inferred from result type. In this example, chunk_size is 8. ```mlir %2 = xegpu.load %1[%2], %0 <{l1_hint = #xegpu.cache_hint, @@ -919,41 +1012,56 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> { has transpose effect, which is similar to `load_gather`. Therefore, a transpose attribute is introduced on purpose, making sure users are aware of this implicit transformation. - In SIMT mode, the result is a 1D vector that represents the data to be stored by - each work-item. If size is not 1, size should be equal to the chunk size. + In lane level, the result is a 1D vector that represents the data to be stored by + each lane. If size is not 1, size should be equal to the chunk size. + + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. Arguments: + - `value`: represents the data to be stored. + - `dest`: represents the memory region to be stored to, which can be either a tensor_desc or a 1D memref or pointer (ui64, ui32, i64 or i32). In case of tensor_desc, offsets come from the producer create_tdesc op. - tensor_desc cannot be used in SIMT mode. + tensor_desc cannot be used at lane level. + - `offsets`: represents offsets from dest. required if `source` in not a TensorDescType. offsets is a vector of `index` type and vector length is either the subgroup size - or 1 in SIMT mode. scalar offset is also valid for SIMT mode. + or 1 at lane level. scalar offset is also valid for lane level. + - `mask`: is a vector of `i1` type, which is used to mask out the memory access. - mask is a vector of size equal to the subgroup size, or 1 in SIMT mode. - scalar mask is also valid for SIMT mode. - - `chunk_size`: (optional) represents contiguous number of elements to store to per work item. - - `l1_hint`, `l2_hint`, `l3_hint`: are optional cache hints for each level of cache. + mask is a vector of size equal to the subgroup size, or 1 at lane level. + scalar mask is also valid for lane level. + + - `chunk_size`: [optional] represents contiguous number of elements to store to per work item. + + - `l1_hint`, `l2_hint`, `l3_hint`: [optional] cache hints for each level of cache. + + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand or the value + to be stored. Only valid at workgroup and subgroup levels. - Example 1: + + Example 1 (Workgroup level): ```mlir xegpu.store %0, %1, %2 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint}> - : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.scattered_tdesc_attr<>>, vector<16xi1> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout}> + : vector<256xf32>, !xegpu.tensor_desc<256xf32, #xegpu.scattered_tdesc_attr<>>, vector<256xi1> ``` - Example 2: + Example 2 (Subgroup level): ```mlir xegpu.store %0, %1, %2 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint}> + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout}> : vector<16x8xf32>, !xegpu.tensor_desc<16x8xf32, #xegpu.scattered_tdesc_attr>, vector<16xi1> ``` - Example 3: + Example 3 (Subgroup level): A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc. It combines "create scattered TensorTdesc" and "store with scattered TensorTdesc". The dest operand could be a raw pointer (uint64_t). @@ -965,12 +1073,13 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> { %mask = vector.constant_mask [16]: vector<16xi1> xegpu.store %val, %a[%offsets], %mask {l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, - l3_hint = #xegpu.cache_hint} + l3_hint = #xegpu.cache_hint, + layout = #xegpu.layout} : memref<1024xf32>, vector<16xi1>, vector<16xindex> -> vector<16xf32> ``` - Example 4 (SIMT mode): - SIMT mode only accepts the offsets variant. chunk_size can be inferred from value + Example 4 (Lane level): + Lane level IR only accepts the offsets variant. chunk_size can be inferred from value type. In this example, chunk_size is 8. ```mlir xegpu.store %0, %1[%2], %3 <{l1_hint = #xegpu.cache_hint, @@ -1061,8 +1170,8 @@ def XeGPU_UpdateOffsetOp: XeGPU_Op<"update_offset", the current position in the number of elements. However, `update_nd_offset` is to update the start point of a 2D block, so its offset constains two elements representing the shift in each dimension. `update_offset` is to - update the offset per work-item, so its offsets contains values representing - shifts for each work-item. + update the offset per lane, so its offsets contains values representing + shifts for each lane. Example: ```mlir @@ -1112,28 +1221,57 @@ def XeGPU_DpasOp : XeGPU_Op<"dpas", [Pure, AllElementTypesMatch<["lhs", "rhs"]>] size, B of `kxn` size, and accumulate on matrix C of `mxn` to the same size matrix , `m=8`, `n=16` and `k=8 * 32/bit_width_of_elem_type`. So for fp16 data type, the matrices are `A: vector<8x16xf16>`, `B: vector<16x16xf16>`, - and `C/D: vector<8x16xf32>`. Besides the matrix size requirements, DPAS - also requires A and B to be loaded with the required data layout. Specially, - VNNI layout is required for B operand. It is achieved via adding `packed` - attribute to the `load_nd` operator. Due to the VNNI transformation, B operands - can be represented as a 3D vector, with the last dimension representing the VNNI - factor, which is computed as `32/bit_width_of_elem_type`. Thus, `B: vector<16x16xf16>` - can be represented as `B: vector<8x16x2xf16>`. - - In SIMT code, each work-item from a subgroup holds a data fragment for A, B, C and the result, + and `C/D: vector<8x16xf32>`. + + In lane level code, each lane from a subgroup holds a data fragment for A, B, C and the result, which are represented as 1D vectors. Please refer to [OpenCL Intel extentions] (https://registry.khronos.org/OpenCL/extensions/intel/cl_intel_subgroup_matrix_multiply_accumulate.html) for more details about the fragment distribution. - Note: on PVC, the hardware can perform load with VNNI transformation when data - element type is 16-bit or lower precision, taking 2 or 4 elements from - the first dimension and inserted into the newly added innermost dimension. + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + + - `lhs`: A vector value representing the left-hand-side matrix tile (A) participating in the + matrix multiply. + + - `rhs`: A vector value representing the right-hand-side matrix tile (B). + + - `acc`: [optional] A vector value representing the accumulator matrix tile (C). When present, the + result is computed as `lhs * rhs + acc`; otherwise, the accumulator is implicitly assumed to be zero. + + - `layout_a`, `layout_b`, `layout_cd`: [optional] Attributes that identify this + operation as anchor for operands A, B, and the accumulator/result, enabling users to assign layouts + that govern distribution at the subgroup and/or lane level. Only valid at workgroup and subgroup + level. + + Example 1 (Workgroup level): + + ```mlir + %d = xegpu.dpas %a, %b, %c <{ + layout_a = #xegpu.layout, + layout_b = #xegpu.layout, + layout_cd = #xegpu.layout} + : vector<64x128xf16>, vector<128x128xf16>, vector<64x128xf32> -> vector<64x128xf32> + ``` + + Example 2 (Lane level): + + ```mlir + %d = xegpu.dpas %a, %b, %c + : vector<8xf16>, vector<16xf16>, vector<8xf32> -> vector<8xf32> + ``` }]; let arguments = (ins XeGPU_DpasOprType : $lhs, XeGPU_DpasOprType : $rhs, - Optional: $acc); + Optional: $acc, + OptionalAttr:$layout_a, + OptionalAttr:$layout_b, + OptionalAttr:$layout_cd + ); let results = (outs XeGPU_DpasResType: $result); let extraClassDeclaration = [{ @@ -1180,13 +1318,35 @@ def XeGPU_AtomicRMWOp: XeGPU_Op<"atomic_rmw", [Pure, has the same shape with `TensorDesc`, and is used to enable or disable specific data points of the `TensorDesc`. The `value` operand represents the new value to be applied during the modification. + + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + - `kind`: An attribute that specifies the atomic operation to be performed + (e.g., add, min, max, exchange, etc.). + + - `tensorDesc`: A `TensorDesc` describing the memory region on which the atomic + read-modify-write is performed. + + - `mask`: A predicate mask with the same shape as `tensorDesc`. Only elements + with a true (non-zero) mask value participate in the atomic operation; + masked-out elements are not modified. + + - `value`: The input values used by the atomic operation. It must have the same + shape and element type as `tensorDesc` and `result`. + + - `layout`: [optional] An attribute that identifies the operation as an anchor, + enabling users to assign a layout that governs distribution at the subgroup + and/or lane level. Only valid at workgroup and subgroup levels. }]; let arguments = (ins AtomicRMWKindAttr:$kind, XeGPU_TensorDesc:$tensorDesc, XeGPU_MaskType:$mask, - XeGPU_ValueType:$value); + XeGPU_ValueType:$value, + OptionalAttr:$layout); let results = (outs XeGPU_ValueType:$result); @@ -1264,10 +1424,29 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> { def XeGPU_ConvertLayoutOp: XeGPU_Op<"convert_layout", [Pure, AllTypesMatch<["source", "result"]>]> { let summary = "Convert the layout of the input operand"; let description = [{ - `convert_layout` redistribute data across subgroups and/or work-items from the `input_layout` to + `convert_layout` redistribute data across subgroups and/or lanes from the `input_layout` to the `target_layout`. Both `input_layout` and `target_layout` must correspond to the same programming - scope, such as workgroup-level (wg) or subgroup-level (sg) code. This operation is not valid once + scope, such as workgroup level (wg) or subgroup level (sg) code. This operation is not valid once the IR is lowered to WI level because that is the end result of all distributions. + + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + + Arguments: + - `source`: The input vector whose data is to be redistributed. The source and + result types must match. + - `input_layout`: The layout attribute describing the current distribution of `source` + across subgroups and/or lanes. + - `target_layout`: The layout attribute describing the desired distribution of the result + across subgroups and/or lanes. + + Example (Subgroup level): + ```mlir + %coop_a = xegpu.convert_layout %a <{ + input_layout = #xegpu.layout, + target_layout = #xegpu.layout}> + : vector<128x128xf16> + ``` }]; let arguments = (ins XeGPU_VectorType: $source, DistributeLayoutAttr: $input_layout, @@ -1298,10 +1477,18 @@ def XeGPU_CreateMemDescOp: XeGPU_Op<"create_mem_desc", [Pure, as the underlying shared local memory. Arguments: - - `source` : 1D or 2D statically shape memref, representing the raw SLM buffer. - The provided memref must be contiguous. + - `source` : 1D or 2D statically shape memref, representing the raw SLM buffer. The provided memref must be contiguous. + Results: - `mem_desc` : the memory descriptor. + + Example: + ```mlir + %mdesc = xegpu.create_mem_desc %mref + : memref<4096xi8, 3> + -> !xegpu.mem_desc<32x64xf16, #xegpu.mem_layout> + ``` + }]; let arguments = (ins AnyTypeOf<[StaticShared1DMemRefOf<[XeGPU_ScalarType]>, StaticShared2DMemRefOf<[XeGPU_ScalarType]>]>:$source); let results = (outs XeGPU_MemDesc:$mem_desc); @@ -1327,17 +1514,30 @@ def XeGPU_LoadMatrixOp: XeGPU_Op<"load_matrix", [MemoryEffects<[MemRead]>, by the provided 2D `mem_desc`. Only 2D memory descriptors are supported; use the subview operation to obtain a compatible 2D `mem_desc` from a higher-rank descriptor if needed. + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + Arguments: - `mem_desc`: the memory descriptor identifying the SLM region. - `offsets`: the coordinates within the matrix to read from. - - `subgroup_block_io`: [optional] An attribute indicating that the operation can be - lowered to a subgroup block load. When this attribute is present, - the offsets are subgroup-uniform across all lanes. - - `layout`: [optional] An attribute for guiding distributions among - subgroups and/or work-items. It currently can accept either - LayoutAttr or SliceAttr. + - `subgroup_block_io`: [optional] An attribute indicating that the operation can be lowered + to a subgroup block load. When this attribute is present, the offsets are subgroup-uniform + across all lanes. Only used on subgroup and lane level. + - `layout`: [optional] Describes the expected layout of the `mem_desc` operand as well as + the result of load (they are identical). + Only valid at workgroup and subgroup levels. + Results: - `res`: the matrix elements loaded from SLM. + + Example (Workgroup level): + ```mlir + %c0 = arith.constant 0 : index + %1 = xegpu.load_matrix %0[%c0, %c0] <{ + layout = #xegpu.layout }> + : !xegpu.mem_desc<128x128xf16, #xegpu.mem_layout> + , index, index -> vector<128x128xf16> + ``` }]; let builders = [ @@ -1377,16 +1577,26 @@ def XeGPU_StoreMatrixOp: XeGPU_Op<"store_matrix", [MemoryEffects<[MemWrite]>, specified by a 2D `mem_desc`. Only 2D memory descriptors are supported; use the subview operation to obtain a 2D `mem_desc` from a higher-rank descriptor if needed. + This operation serves as an anchor through which users assign a layout attribute + to govern computation distribution. + Arguments: - `mem_desc`: the memory descriptor specifying the SLM region. - `offsets`: the coordinates within the matrix where the data will be written. - `data`: the values to be stored in the matrix. - - `subgroup_block_io`: [optional] An attribute indicating that the operation can be - lowered to a subgroup block store. When this attribute is present, - the offsets are subgroup-uniform across all lanes. - - `layout`: [optional] An attribute for guiding distributions among - subgroups and/or work-items. It currently can accept either - LayoutAttr or SliceAttr. + - `subgroup_block_io`: [optional] An attribute indicating that the operation can be lowered + to a subgroup block load. When this attribute is present, the offsets are subgroup-uniform + across all lanes. Only used on subgroup and lane level. + - `layout`: [optional] Describes the expected layout of the `tensor_desc` operand as well as + the value to be stored (they are identical). Only valid at workgroup and subgroup levels. + + Example (Workgroup level): + ```mlir + %c0 = arith.constant 0 : index + xegpu.store_matrix %1, %0[%c0, %c0] <{ + layout = #xegpu.layout }> + : vector<128x128xf16>, !xegpu.mem_desc<128x128xf16>>, index, index + ``` }]; let builders = [ OpBuilder<(ins "Value" : $data, "TypedValue": $mem_desc, diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp index 85c9a966f0fe8..8cb666298c959 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp @@ -465,7 +465,7 @@ void PrefetchNdOp::build(OpBuilder &builder, OperationState &state, xegpu::CachePolicyAttr l3_hint) { return build(builder, state, tensorDesc, ValueRange(), DenseI64ArrayAttr(), - l1_hint, l2_hint, l3_hint); + l1_hint, l2_hint, l3_hint, /*anchor_layout=*/nullptr); } void PrefetchNdOp::build(OpBuilder &builder, OperationState &state, @@ -480,7 +480,7 @@ void PrefetchNdOp::build(OpBuilder &builder, OperationState &state, auto staticOffsetsAttr = builder.getDenseI64ArrayAttr(staticOffsets); build(builder, state, tensorDesc, dynamicOffsets, staticOffsetsAttr, l1_hint, - l2_hint, l3_hint); + l2_hint, l3_hint, /*anchor_layout=*/nullptr); } LogicalResult PrefetchNdOp::verify() { @@ -519,7 +519,7 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType, return build(builder, state, retType, tensorDesc, ValueRange(), DenseI64ArrayAttr(), packed, transpose, l1_hint, l2_hint, - l3_hint); + l3_hint, /*anchor_layout=*/nullptr); } void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType, @@ -535,7 +535,8 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType, auto staticOffsetsAttr = builder.getDenseI64ArrayAttr(staticOffsets); build(builder, state, retType, tensorDesc, dynamicOffsets, staticOffsetsAttr, - packed, transpose, l1_hint, l2_hint, l3_hint); + packed, transpose, l1_hint, l2_hint, l3_hint, + /*anchor_layout=*/nullptr); } LogicalResult LoadNdOp::verify() { @@ -638,7 +639,8 @@ void StoreNdOp::build(OpBuilder &builder, OperationState &state, Value value, xegpu::CachePolicyAttr l3_hint) { return build(builder, state, value, tensorDesc, ValueRange(), - DenseI64ArrayAttr(), l1_hint, l2_hint, l3_hint); + DenseI64ArrayAttr(), l1_hint, l2_hint, l3_hint, + /*anchor_layout=*/nullptr); } void StoreNdOp::build(OpBuilder &builder, OperationState &state, Value value, @@ -653,7 +655,7 @@ void StoreNdOp::build(OpBuilder &builder, OperationState &state, Value value, auto staticOffsetsAttr = builder.getDenseI64ArrayAttr(staticOffsets); build(builder, state, value, tensorDesc, dynamicOffsets, staticOffsetsAttr, - l1_hint, l2_hint, l3_hint); + l1_hint, l2_hint, l3_hint, /*anchor_layout=*/nullptr); } LogicalResult StoreNdOp::verify() { @@ -826,7 +828,7 @@ void PrefetchOp::build(OpBuilder &builder, OperationState &state, Value source, xegpu::CachePolicyAttr l2_hint, xegpu::CachePolicyAttr l3_hint) { build(builder, state, source, Value(), l1_hint, l2_hint, l3_hint, - IntegerAttr{}); + IntegerAttr{}, /*anchor_layout=*/nullptr); } //===----------------------------------------------------------------------===// @@ -876,7 +878,7 @@ void LoadGatherOp::build(OpBuilder &builder, OperationState &state, xegpu::CachePolicyAttr l2_hint, xegpu::CachePolicyAttr l3_hint) { build(builder, state, valueType, source, Value(), mask, IntegerAttr(), - l1_hint, l2_hint, l3_hint, /*layout=*/nullptr); + l1_hint, l2_hint, l3_hint, /*anchor_layout=*/nullptr); } void LoadGatherOp::build(OpBuilder &builder, OperationState &state, @@ -892,7 +894,7 @@ void LoadGatherOp::build(OpBuilder &builder, OperationState &state, auto offset = vector::FromElementsOp::create(builder, loc, type, values); build(builder, state, valueType, source, offset, mask, chunk_size, l1_hint, - l2_hint, l3_hint, /*layout=*/nullptr); + l2_hint, l3_hint, /*anchor_layout=*/nullptr); } void LoadGatherOp::build(OpBuilder &builder, OperationState &state, @@ -960,7 +962,7 @@ void StoreScatterOp::build(OpBuilder &builder, OperationState &state, xegpu::CachePolicyAttr l2_hint, xegpu::CachePolicyAttr l3_hint) { build(builder, state, value, dest, Value(), mask, IntegerAttr(), l1_hint, - l2_hint, l3_hint, /*layout=*/nullptr); + l2_hint, l3_hint, /*anchor_layout=*/nullptr); } void StoreScatterOp::build(OpBuilder &builder, OperationState &state, @@ -978,7 +980,7 @@ void StoreScatterOp::build(OpBuilder &builder, OperationState &state, // Call the correct builder overload that does not expect result types. build(builder, state, value, dest, offset, mask, chunk_size, l1_hint, l2_hint, - l3_hint, /*layout=*/nullptr); + l3_hint, /*anchor_layout=*/nullptr); } void StoreScatterOp::build( diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp index 6b3ba5a5981ce..f2b0e71c9397f 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUPropagateLayout.cpp @@ -387,6 +387,8 @@ class LayoutInfoPropagation ArrayRef operands, ArrayRef results); + bool hasParamsOfLayoutKind(xegpu::DistributeLayoutAttr anchorLayout); + public: LayoutInfoPropagation(DataFlowSolver &solver, SymbolTableCollection &symbolTable, @@ -475,48 +477,72 @@ LogicalResult LayoutInfoPropagation::visitOperation( return success(); } +bool LayoutInfoPropagation::hasParamsOfLayoutKind( + xegpu::DistributeLayoutAttr anchorLayout) { + if (anchorLayout == nullptr) { + return false; + } + if (layoutKind == LayoutKind::InstData) { + return !(anchorLayout.getEffectiveInstDataAsInt().empty()); + } else if (layoutKind == LayoutKind::Lane) { + return !(anchorLayout.getEffectiveLaneLayoutAsInt().empty() || + anchorLayout.getEffectiveLaneDataAsInt().empty()); + } + return false; +} + void LayoutInfoPropagation::visitPrefetchNdOp( xegpu::PrefetchNdOp prefetch, ArrayRef operands, ArrayRef results) { - // Here we assign the default layout to the tensor descriptor operand of - // prefetch. - auto tdescTy = prefetch.getTensorDescType(); - - auto uArch = getUArch(getChipStr(prefetch).value_or("")); - const auto *uArchInstruction = - dyn_cast( - uArch->getInstruction( - xegpu::uArch::InstructionKind::Subgroup2DBlockPrefetch)); - - auto blockWHC = - uArchInstruction->getBlockWidthHeightCount(tdescTy.getElementType()); - if (!blockWHC) - prefetch.emitWarning("No known block params found for the element type."); - auto [bWidth, bHeight, bCount] = blockWHC.value(); - SmallVector instData; - int instWidth = xegpu::getLargestDivisor( - static_cast(tdescTy.getDimSize(tdescTy.getRank() - 1)), bWidth); - if (instWidth == -1) - prefetch.emitWarning( - "No suitable instruction multiple found for the given shape."); - if (tdescTy.getRank() == 1) - instData = {instWidth}; - else { - int instHeight = xegpu::getLargestDivisor( - static_cast(tdescTy.getDimSize(tdescTy.getRank() - 2)), bHeight); - if (instHeight == -1) + + LayoutInfo prefetchLayout; + xegpu::DistributeLayoutAttr anchorLayout = prefetch.getLayoutAttr(); + if (hasParamsOfLayoutKind(anchorLayout)) { + prefetchLayout = LayoutInfo(anchorLayout); + } else { + // Here we assign the default layout to the tensor descriptor operand of + // prefetch. + auto tdescTy = prefetch.getTensorDescType(); + + auto uArch = getUArch(getChipStr(prefetch).value_or("")); + const auto *uArchInstruction = + dyn_cast( + uArch->getInstruction( + xegpu::uArch::InstructionKind::Subgroup2DBlockPrefetch)); + + auto blockWHC = + uArchInstruction->getBlockWidthHeightCount(tdescTy.getElementType()); + if (!blockWHC) + prefetch.emitWarning("No known block params found for the element type."); + auto [bWidth, bHeight, bCount] = blockWHC.value(); + SmallVector instData; + int instWidth = xegpu::getLargestDivisor( + static_cast(tdescTy.getDimSize(tdescTy.getRank() - 1)), bWidth, + bCount); + if (instWidth == -1) prefetch.emitWarning( "No suitable instruction multiple found for the given shape."); - instData = {instHeight, instWidth}; - } - LayoutInfo prefetchLayout; - if (layoutKind == LayoutKind::InstData) - prefetchLayout = - LayoutInfo(xegpu::LayoutAttr::get(tdescTy.getContext(), instData)); - else - prefetchLayout = getDefaultSIMTLayoutInfo( - tdescTy, uArch, uArchInstruction->getPackedFormatBitSize()); + if (tdescTy.getRank() == 1) + instData = {instWidth}; + else { + int instHeight = xegpu::getLargestDivisor( + static_cast(tdescTy.getDimSize(tdescTy.getRank() - 2)), bHeight); + if (instHeight == -1) + prefetch.emitWarning( + "No suitable instruction multiple found for the given shape."); + instData = {instHeight, instWidth}; + } + + if (layoutKind == LayoutKind::InstData) + prefetchLayout = + LayoutInfo(xegpu::LayoutAttr::get(tdescTy.getContext(), instData)); + else + prefetchLayout = getDefaultSIMTLayoutInfo( + tdescTy, uArch, uArchInstruction->getPackedFormatBitSize()); + prefetch.setLayoutAttr( + dyn_cast(prefetchLayout.get())); + } // Propagate the layout to the source tensor descriptor. propagateIfChanged(operands[0], operands[0]->meet(prefetchLayout)); } @@ -616,70 +642,97 @@ void LayoutInfoPropagation::visitUpdateNdOffsetOp( void LayoutInfoPropagation::visitDpasOp( xegpu::DpasOp dpas, ArrayRef operands, ArrayRef results) { - VectorType aTy = dpas.getLhsType(); - VectorType bTy = dpas.getRhsType(); - - auto uArch = getUArch(getChipStr(dpas).value_or("")); - const int subgroupSize = uArch->getSubgroupSize(); - const auto *uArchInstruction = - dyn_cast(uArch->getInstruction( - xegpu::uArch::InstructionKind::SubgroupMatrixMultiplyAcc)); - - const unsigned dataALen = aTy.getShape().front(); - auto supportedALen = uArchInstruction->getSupportedM(aTy.getElementType()); - const int maxALen = - xegpu::getLargestDivisor(dataALen, ArrayRef(supportedALen)); - if (maxALen == -1) - dpas.emitWarning( - "No suitable instruction multiple found for the given shape."); - - const unsigned dataBLen = bTy.getShape().back(); - auto supportedBLen = uArchInstruction->getSupportedK(bTy.getElementType()); - const int maxBLen = - xegpu::getLargestDivisor(dataBLen, ArrayRef(supportedBLen)); - if (maxBLen == -1) - dpas.emitWarning( - "No suitable instruction multiple found for the given shape."); - SmallVector instDataA = {maxALen, subgroupSize}; - SmallVector instDataB = {subgroupSize, maxBLen}; LayoutInfo dpasALayout; LayoutInfo dpasBLayout; - LayoutInfo dpasCLayout; + LayoutInfo dpasCDLayout; + + xegpu::DistributeLayoutAttr anchorLayoutCD = dpas.getLayoutCdAttr(); + if (hasParamsOfLayoutKind(anchorLayoutCD)) { + xegpu::DistributeLayoutAttr anchorLayoutA = dpas.getLayoutAAttr(); + xegpu::DistributeLayoutAttr anchorLayoutB = dpas.getLayoutBAttr(); + assert(hasParamsOfLayoutKind(anchorLayoutA) && + "Expected anchor layout for DPAS A operand."); + assert(hasParamsOfLayoutKind(anchorLayoutB) && + "Expected anchor layout for DPAS B operand."); + dpasALayout = LayoutInfo(anchorLayoutA); + dpasBLayout = LayoutInfo(anchorLayoutB); + dpasCDLayout = LayoutInfo(anchorLayoutCD); - if (layoutKind == LayoutKind::InstData) { - dpasALayout = - LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataA)); - dpasBLayout = - LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataB)); } else { - dpasALayout = getSIMTLayoutInfoForDPASOperand( - aTy, 0, uArch, uArchInstruction->getPackedFormatBitSizeA()); - dpasBLayout = getSIMTLayoutInfoForDPASOperand( - bTy, 1, uArch, uArchInstruction->getPackedFormatBitSizeB()); - } - propagateIfChanged(operands[0], operands[0]->meet(dpasALayout)); - propagateIfChanged(operands[1], operands[1]->meet(dpasBLayout)); - if (operands.size() > 2) { - VectorType cTy = dpas.getAccType(); - const unsigned dataCLen = bTy.getShape().back(); - auto supportedCLen = uArchInstruction->getSupportedN(bTy.getElementType()); - const int maxCLen = - xegpu::getLargestDivisor(dataCLen, ArrayRef(supportedCLen)); - if (maxCLen == -1) + VectorType aTy = dpas.getLhsType(); + VectorType bTy = dpas.getRhsType(); + + auto uArch = getUArch(getChipStr(dpas).value_or("")); + const int subgroupSize = uArch->getSubgroupSize(); + const auto *uArchInstruction = + dyn_cast(uArch->getInstruction( + xegpu::uArch::InstructionKind::SubgroupMatrixMultiplyAcc)); + + const unsigned dataALen = aTy.getShape().front(); + auto supportedALen = uArchInstruction->getSupportedM(aTy.getElementType()); + const int maxALen = + xegpu::getLargestDivisor(dataALen, ArrayRef(supportedALen)); + if (maxALen == -1) dpas.emitWarning( "No suitable instruction multiple found for the given shape."); - SmallVector instDataC = {maxALen, maxCLen}; - if (layoutKind == LayoutKind::InstData) - dpasCLayout = - LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataC)); - else - dpasCLayout = getSIMTLayoutInfoForDPASOperand( - cTy, 2, uArch, uArchInstruction->getPackedFormatBitSizeB()); + const unsigned dataBLen = bTy.getShape().back(); + auto supportedBLen = uArchInstruction->getSupportedN(bTy.getElementType()); + + const int maxBLen = + xegpu::getLargestDivisor(dataBLen, ArrayRef(supportedBLen)); + + if (maxBLen == -1) + dpas.emitWarning( + "No suitable instruction multiple found for the given shape."); + SmallVector instDataA = {maxALen, subgroupSize}; + SmallVector instDataB = {subgroupSize, maxBLen}; - propagateIfChanged(operands[2], operands[2]->meet(dpasCLayout)); + if (layoutKind == LayoutKind::InstData) { + dpasALayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataA)); + dpasBLayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataB)); + } else { + dpasALayout = getSIMTLayoutInfoForDPASOperand( + aTy, 0, uArch, uArchInstruction->getPackedFormatBitSizeA()); + dpasBLayout = getSIMTLayoutInfoForDPASOperand( + bTy, 1, uArch, uArchInstruction->getPackedFormatBitSizeB()); + } + + if (operands.size() > 2) { + VectorType cTy = dpas.getAccType(); + if (layoutKind == LayoutKind::InstData) { + const unsigned dataCLen = bTy.getShape().back(); + auto supportedCLen = + uArchInstruction->getSupportedN(bTy.getElementType()); + const int maxCLen = xegpu::getLargestDivisor( + dataCLen, ArrayRef(supportedCLen)); + if (maxCLen == -1) + dpas.emitWarning( + "No suitable instruction multiple found for the given shape."); + SmallVector instDataC = {maxALen, maxCLen}; + dpasCDLayout = + LayoutInfo(xegpu::LayoutAttr::get(dpas.getContext(), instDataC)); + } else + dpasCDLayout = getSIMTLayoutInfoForDPASOperand( + cTy, 2, uArch, uArchInstruction->getPackedFormatBitSizeB()); + + dpas.setLayoutCdAttr( + dyn_cast(dpasCDLayout.get())); + } + dpas.setLayoutAAttr( + dyn_cast(dpasALayout.get())); + dpas.setLayoutBAttr( + dyn_cast(dpasBLayout.get())); + } + + propagateIfChanged(operands[0], operands[0]->meet(dpasALayout)); + propagateIfChanged(operands[1], operands[1]->meet(dpasBLayout)); + if (operands.size() > 2) { + propagateIfChanged(operands[2], operands[2]->meet(dpasCDLayout)); } } @@ -688,42 +741,51 @@ void LayoutInfoPropagation::visitStoreNdOp( xegpu::StoreNdOp store, ArrayRef operands, ArrayRef results) { - auto uArch = getUArch(getChipStr(store).value_or("")); - const auto *uArchInstruction = - dyn_cast( - uArch->getInstruction( - xegpu::uArch::InstructionKind::Subgroup2DBlockStore)); - VectorType dataTy = store.getValueType(); - auto blockWHC = uArchInstruction->getBlockWidthHeightCount( - store.getValueType().getElementType()); - if (!blockWHC) - store.emitWarning("No known block params found for the element type."); - auto [bWidth, bHeight, bCount] = blockWHC.value(); - SmallVector instData; - int instWidth = xegpu::getLargestDivisor( - static_cast(dataTy.getDimSize(dataTy.getRank() - 1)), bWidth); - if (instWidth == -1) - store.emitWarning( - "No suitable instruction multiple found for the given shape."); - if (dataTy.getRank() == 1) - instData = {instWidth}; - else { - int instHeight = xegpu::getLargestDivisor( - static_cast(dataTy.getDimSize(dataTy.getRank() - 2)), bHeight); - if (instHeight == -1) + LayoutInfo storeLayout; + xegpu::DistributeLayoutAttr anchorLayout = store.getLayoutAttr(); + if (hasParamsOfLayoutKind(anchorLayout)) { + storeLayout = LayoutInfo(anchorLayout); + } else { + auto uArch = getUArch(getChipStr(store).value_or("")); + const auto *uArchInstruction = + dyn_cast( + uArch->getInstruction( + xegpu::uArch::InstructionKind::Subgroup2DBlockStore)); + VectorType dataTy = store.getValueType(); + auto blockWHC = uArchInstruction->getBlockWidthHeightCount( + store.getValueType().getElementType()); + if (!blockWHC) + store.emitWarning("No known block params found for the element type."); + auto [bWidth, bHeight, bCount] = blockWHC.value(); + SmallVector instData; + int instWidth = xegpu::getLargestDivisor( + static_cast(dataTy.getDimSize(dataTy.getRank() - 1)), bWidth, + bCount); + if (instWidth == -1) store.emitWarning( "No suitable instruction multiple found for the given shape."); - instData = {instHeight, instWidth}; - } + if (dataTy.getRank() == 1) + instData = {instWidth}; + else { + int instHeight = xegpu::getLargestDivisor( + static_cast(dataTy.getDimSize(dataTy.getRank() - 2)), bHeight); + if (instHeight == -1) + store.emitWarning( + "No suitable instruction multiple found for the given shape."); + instData = {instHeight, instWidth}; + } - LayoutInfo storeLayout; - if (layoutKind == LayoutKind::InstData) - storeLayout = - LayoutInfo(xegpu::LayoutAttr::get(dataTy.getContext(), instData)); - else - storeLayout = - getDefaultSIMTLayoutInfo(store.getValueType(), uArch, - uArchInstruction->getPackedFormatBitSize()); + if (layoutKind == LayoutKind::InstData) + storeLayout = + LayoutInfo(xegpu::LayoutAttr::get(dataTy.getContext(), instData)); + else + storeLayout = + getDefaultSIMTLayoutInfo(store.getValueType(), uArch, + uArchInstruction->getPackedFormatBitSize()); + store.setLayoutAttr( + dyn_cast(storeLayout.get())); + } + // Propagate the layout to the value operand. // Both operands should have the same layout for (LayoutInfoLattice *operand : operands) propagateIfChanged(operand, operand->meet(storeLayout)); @@ -734,21 +796,30 @@ void LayoutInfoPropagation::visitStoreNdOp( void LayoutInfoPropagation::visitLoadNdOp( xegpu::LoadNdOp load, ArrayRef operands, ArrayRef results) { - LayoutInfo valueLayout = results[0]->getValue(); - // Need the layout of the value to propagate to the tensor descriptor. - if (!valueLayout.isAssigned()) - return; - LayoutInfo tensorDescLayout = valueLayout; - // LoadNdOp has the transpose effect. However, at the stage of this analysis - // this effect is not expected and should be abstracted away. Emit a - // warning. - if (auto transpose = load.getTranspose()) { - load.emitWarning("Transpose effect is not expected for LoadNdOp at " - "LayoutInfoPropagation stage."); - tensorDescLayout = valueLayout.transpose(transpose.value()); + + LayoutInfo loadLayout; + xegpu::DistributeLayoutAttr anchorLayout = load.getLayoutAttr(); + if (hasParamsOfLayoutKind(anchorLayout)) { + loadLayout = LayoutInfo(anchorLayout); + } else { + + LayoutInfo valueLayout = results[0]->getValue(); + // Need the layout of the value to propagate to the tensor descriptor. + if (!valueLayout.isAssigned()) + return; + loadLayout = valueLayout; + // LoadNdOp has the transpose effect. However, at the stage of this analysis + // this effect is not expected and should be abstracted away. Emit a + // warning. + if (auto transpose = load.getTranspose()) { + load.emitWarning("Transpose effect is not expected for LoadNdOp at " + "LayoutInfoPropagation stage."); + loadLayout = valueLayout.transpose(transpose.value()); + } + load.setLayoutAttr(dyn_cast(loadLayout.get())); } // Propagate the new layout to the tensor descriptor operand. - propagateIfChanged(operands[0], operands[0]->meet(tensorDescLayout)); + propagateIfChanged(operands[0], operands[0]->meet(loadLayout)); } /// For vector::TransposeOp, the layout of the result is transposed and @@ -838,37 +909,48 @@ void LayoutInfoPropagation::visitVectorBitcastOp( void LayoutInfoPropagation::visitLoadGatherOp( xegpu::LoadGatherOp load, ArrayRef operands, ArrayRef results) { - // The layout is strictly determined by the payload type. - auto payloadTy = dyn_cast(load.getValueType()); - if (!payloadTy) { - load.emitWarning("Not propagating, non-vector payload supplied."); - return; - } - auto uArch = getUArch(getChipStr(load).value_or("")); - const int subgroupSize = uArch->getSubgroupSize(); - SmallVector instData{subgroupSize}; - if (auto chunkSize = load.getChunkSize().value_or(0); chunkSize > 1) - instData.push_back(chunkSize); - else if (auto srcTdescTy = - dyn_cast(load.getSourceType())) { - if (srcTdescTy.getChunkSizeAsInt() > 1) + + LayoutInfo loadLayout; + LayoutInfo maskLayout; + xegpu::DistributeLayoutAttr anchorLayout = load.getLayoutAttr(); + if (hasParamsOfLayoutKind(anchorLayout)) { + loadLayout = LayoutInfo(anchorLayout); + maskLayout = loadLayout; + } else { + + // The layout is strictly determined by the payload type. + auto payloadTy = dyn_cast(load.getValueType()); + if (!payloadTy) { + load.emitWarning("Not propagating, non-vector payload supplied."); + return; + } + auto uArch = getUArch(getChipStr(load).value_or("")); + const int subgroupSize = uArch->getSubgroupSize(); + SmallVector instData{subgroupSize}; + if (auto chunkSize = load.getChunkSize().value_or(0); chunkSize > 1) instData.push_back(chunkSize); - } - LayoutInfo layout; - if (layoutKind == LayoutKind::InstData) - layout = LayoutInfo(xegpu::LayoutAttr::get(load.getContext(), instData)); - else - layout = getDefaultSIMTLayoutInfo(payloadTy, uArch, - uArch->getGeneralPackedFormatBitSize(), - /*scattered*/ true); - - // Mask operand should have 1D default layout. - LayoutInfo maskLayout = - getDefaultSIMTLayoutInfo(load->getContext(), 1, subgroupSize); + else if (auto srcTdescTy = + dyn_cast(load.getSourceType())) { + if (srcTdescTy.getChunkSizeAsInt() > 1) + instData.push_back(chunkSize); + } + + if (layoutKind == LayoutKind::InstData) + loadLayout = + LayoutInfo(xegpu::LayoutAttr::get(load.getContext(), instData)); + else + loadLayout = getDefaultSIMTLayoutInfo( + payloadTy, uArch, uArch->getGeneralPackedFormatBitSize(), + /*scattered*/ true); + + // Mask operand should have 1D default layout. + maskLayout = getDefaultSIMTLayoutInfo(load->getContext(), 1, subgroupSize); + load.setLayoutAttr(dyn_cast(loadLayout.get())); + } // Propagate the new layout to the tensor descriptor operand. if (isa(load.getSourceType())) - propagateIfChanged(operands[0], operands[0]->meet(layout)); + propagateIfChanged(operands[0], operands[0]->meet(loadLayout)); // Propagate the new layout to the mask and optional offset operand. propagateIfChanged(operands[1], operands[1]->meet(maskLayout)); if (load.getOffsets()) @@ -896,21 +978,26 @@ void LayoutInfoPropagation::visitCreateDescOp( void LayoutInfoPropagation::visitStoreScatterOp( xegpu::StoreScatterOp storeScatter, ArrayRef operands, ArrayRef results) { - // Currently, for 2D StoreScatterOp we expect that the height dimension of - // the tensor descriptor is equal to the subgroup size. This is ensured by - // the op verifier. - auto payloadTy = dyn_cast(storeScatter.getValueType()); - if (!payloadTy) { - storeScatter.emitWarning("Not propagating, non-vector payload supplied."); - return; - } - LayoutInfo payloadLayout; - auto uArch = getUArch(getChipStr(storeScatter).value_or("")); - const int subgroupSize = uArch->getSubgroupSize(); - if (auto layout = storeScatter.getLayoutAttr()) { - payloadLayout = LayoutInfo(layout); + LayoutInfo payloadLayout; + LayoutInfo maskLayout; + xegpu::DistributeLayoutAttr anchorLayout = storeScatter.getLayoutAttr(); + if (hasParamsOfLayoutKind(anchorLayout)) { + payloadLayout = LayoutInfo(anchorLayout); + maskLayout = payloadLayout; } else { + // Currently, for 2D StoreScatterOp we expect that the height dimension of + // the tensor descriptor is equal to the subgroup size. This is ensured by + // the op verifier. + auto payloadTy = dyn_cast(storeScatter.getValueType()); + if (!payloadTy) { + storeScatter.emitWarning("Not propagating, non-vector payload supplied."); + return; + } + + auto uArch = getUArch(getChipStr(storeScatter).value_or("")); + const int subgroupSize = uArch->getSubgroupSize(); + if (layoutKind == LayoutKind::InstData) { SmallVector instData{subgroupSize}; if (auto chunkSize = storeScatter.getChunkSize().value_or(0); @@ -934,10 +1021,13 @@ void LayoutInfoPropagation::visitStoreScatterOp( payloadTy, uArch, uArch->getGeneralPackedFormatBitSize(), /*scattered=*/true); } - } - LayoutInfo maskLayout = - getDefaultSIMTLayoutInfo(storeScatter->getContext(), 1, subgroupSize); + maskLayout = + getDefaultSIMTLayoutInfo(storeScatter->getContext(), 1, subgroupSize); + + storeScatter.setLayoutAttr( + dyn_cast(payloadLayout.get())); + } // Propagate the payload operand layout propagateIfChanged(operands[0], operands[0]->meet(payloadLayout)); // Propagate the destination (if tdesc) operand layout diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp index beb9b60aa9d7a..48bd0662b03ff 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp @@ -86,8 +86,16 @@ genOffsetsList(ConversionPatternRewriter &rewriter, OpType op, if (origOffsets.empty()) return failure(); + // if op is xegpu::CreateNdDescOp, call op.getDescLayoutAttr() + xegpu::DistributeLayoutAttr layout; + if constexpr (std::is_same_v || + std::is_same_v) { + layout = op.getLayoutAttr(); + } else { + layout = op.getDescLayoutAttr(); + } + // not applicable to ops without workgroup layout attributes - xegpu::DistributeLayoutAttr layout = op.getLayoutAttr(); if (!layout || !layout.isForWorkgroup()) return failure(); @@ -190,7 +198,7 @@ struct WgToSgCreateNdOp : public OpConversionPattern { xegpu::TensorDescType tdescTy = op.getType(); ArrayRef wgShape = tdescTy.getShape(); Type elemTy = tdescTy.getElementType(); - xegpu::DistributeLayoutAttr layout = op.getLayoutAttr(); + xegpu::DistributeLayoutAttr layout = tdescTy.getLayoutAttr(); SmallVector sgShape = getSgShapeAndCount(wgShape, layout).first; auto newTdescTy = xegpu::TensorDescType::get(ctx, sgShape, elemTy, tdescTy.getEncoding(), diff --git a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp index b0905c4e9203b..91432b1c11304 100644 --- a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp +++ b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp @@ -140,7 +140,6 @@ xegpu::DistributeLayoutAttr xegpu::getDistributeLayoutAttr(const Value value) { // for StoreMatrixOp, the layout is attached to the property of the op if (auto storeOp = dyn_cast(defOp)) return storeOp.getLayoutAttr(); - std::string layoutName = getLayoutName(result); if (defOp->hasAttr(layoutName)) return defOp->getAttrOfType(layoutName); diff --git a/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir b/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir index 0c837e17a0afa..d911baa49acbb 100644 --- a/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir +++ b/mlir/test/Dialect/XeGPU/propagate-layout-inst-data.mlir @@ -6,9 +6,9 @@ // CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<8x16xf32> // CHECK: %[[TDESC_SRC:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<8x32xf32> -> !xegpu.tensor_desc<8x32xf32, #xegpu.layout> // CHECK: %[[TDESC_DST:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<8x32xf32> -> !xegpu.tensor_desc<8x32xf32, #xegpu.layout> -// CHECK: %[[LOADED:.*]] = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : +// CHECK: %[[LOADED:.*]] = xegpu.load_nd %0 <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<8x32xf32, #xegpu.layout> -> vector<8x32xf32> -// CHECK: xegpu.store_nd %[[LOADED]], %[[TDESC_DST]] : vector<8x32xf32>, !xegpu.tensor_desc<8x32xf32, #xegpu.layout> +// CHECK: xegpu.store_nd %[[LOADED]], %[[TDESC_DST]] <{layout = #xegpu.layout}> : vector<8x32xf32>, !xegpu.tensor_desc<8x32xf32, #xegpu.layout> gpu.module @test { // Although the uArch allows 8x32 inst data using block count (or array_len), // it is up to optimization passes to decide on the block count usage. @@ -29,14 +29,14 @@ func.func @load_store_no_array_len(%arg0: memref<8x32xf32>, %arg1: memref<8x32xf // CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0.000000e+00> : vector<8x16xf32> // CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout // CHECK: %[[T1:.*]] = xegpu.create_nd_tdesc %[[ARG1]][{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> -// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> -// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_a = #xegpu.layout, layout_b = #xegpu.layout, layout_cd = #xegpu.layout, layout_result_0 = #xegpu.layout} : // CHECK-SAME: vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32> // CHECK: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout -// CHECK: xegpu.store_nd %[[T4]], %[[T5]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> +// CHECK: xegpu.store_nd %[[T4]], %[[T5]] <{layout = #xegpu.layout}> : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.module @test { func.func @dpas_f16(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { @@ -70,7 +70,7 @@ gpu.module @test_kernel { %out:3 = scf.for %k = %c0 to %c1024 step %c32 iter_args(%arg0 = %a_tdesc, %arg1 = %b_tdesc, %arg2 = %c_tdesc) -> (!xegpu.tensor_desc<16x32xf16>, !xegpu.tensor_desc<16x32xf16>, !xegpu.tensor_desc<16x32xf16>) { - //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : + //CHECK: xegpu.load_nd {{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : //CHECK-SAME: !xegpu.tensor_desc<16x32xf16, #xegpu.layout> -> vector<16x32xf16> %a = xegpu.load_nd %arg0 : !xegpu.tensor_desc<16x32xf16> -> vector<16x32xf16> %b = xegpu.load_nd %arg1 : !xegpu.tensor_desc<16x32xf16> -> vector<16x32xf16> @@ -109,7 +109,7 @@ gpu.module @test_kernel { %out:3 = scf.for %k = %c0 to %c1024 step %c32 iter_args(%arg0 = %a_tdesc, %arg1 = %b_tdesc, %arg2 = %c_tdesc) -> (!xegpu.tensor_desc<12x32xf16>, !xegpu.tensor_desc<12x32xf16>, !xegpu.tensor_desc<12x32xf16>) { - //CHECK: xegpu.load_nd {{.*}} {layout_result_0 = #xegpu.layout} : + //CHECK: xegpu.load_nd {{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : //CHECK-SAME: !xegpu.tensor_desc<12x32xf16, #xegpu.layout> -> vector<12x32xf16> %a = xegpu.load_nd %arg0 : !xegpu.tensor_desc<12x32xf16> -> vector<12x32xf16> %b = xegpu.load_nd %arg1 : !xegpu.tensor_desc<12x32xf16> -> vector<12x32xf16> @@ -137,9 +137,9 @@ gpu.module @test { // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { // CHECK: %{{.*}} = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> // CHECK: %{{.*}} = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> -// CHECK: %{{.*}} = xegpu.load %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64}> +// CHECK: %{{.*}} = xegpu.load %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64, layout = #xegpu.layout}> // CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> -// CHECK: xegpu.store %0, %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> +// CHECK: xegpu.store %0, %[[ARG0]][%{{.*}}], %{{.*}} <{chunk_size = 8 : i64, layout = #xegpu.layout}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> func.func @scatter_ops_chunksize(%src: memref<256xf16>) { %1 = arith.constant dense<1>: vector<16xi1> %offset = arith.constant dense<12> : vector<16xindex> diff --git a/mlir/test/Dialect/XeGPU/propagate-layout.mlir b/mlir/test/Dialect/XeGPU/propagate-layout.mlir index eb004932af4be..f8b59b87a122b 100644 --- a/mlir/test/Dialect/XeGPU/propagate-layout.mlir +++ b/mlir/test/Dialect/XeGPU/propagate-layout.mlir @@ -6,14 +6,14 @@ gpu.module @test { // CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0.000000e+00> : vector<8x16xf32> // CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> // CHECK: %[[T1:.*]] = xegpu.create_nd_tdesc %[[ARG1]][{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T2:.*]] = xegpu.load_nd %[[T0]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> -// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> -// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T4:.*]] = xegpu.dpas %[[T2]], %[[T3]], %[[CST]] {layout_a = #xegpu.layout, layout_b = #xegpu.layout, layout_cd = #xegpu.layout, layout_result_0 = #xegpu.layout} : // CHECK-SAME: vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32> // CHECK: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -// CHECK: xegpu.store_nd %[[T4]], %[[T5]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> +// CHECK: xegpu.store_nd %[[T4]], %[[T5]] <{layout = #xegpu.layout}> : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> func.func @dpas_f16(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index %cst = arith.constant dense<0.000000e+00> : vector<8x16xf32> @@ -32,7 +32,8 @@ func.func @dpas_f16(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: me gpu.module @test { // CHECK-LABEL: func.func @dpas_i8( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: vector<8x32xi8>, %[[ARG1:[0-9a-zA-Z]+]]: vector<32x16xi8>, %[[ARG2:[0-9a-zA-Z]+]]: memref<8x16xi32>) { -// CHECK: %[[T0:.*]] = xegpu.dpas %[[ARG0]], %[[ARG1]] {layout_result_0 = #xegpu.layout, layout_b = #xegpu.layout, layout_result_0 = #xegpu.layout} + func.func @dpas_i8(%arg0: vector<8x32xi8>, %arg1: vector<32x16xi8>, %arg2: memref<8x16xi32>) { %c0 = arith.constant 0 : index %0 = xegpu.dpas %arg0, %arg1 : vector<8x32xi8>, vector<32x16xi8> -> vector<8x16xi32> @@ -46,8 +47,8 @@ func.func @dpas_i8(%arg0: vector<8x32xi8>, %arg1: vector<32x16xi8>, %arg2: memre gpu.module @test { // CHECK-LABEL: func.func @load_with_transpose_effect( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf16>, %[[ARG0:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf32>) { -// CHECK: %{{.*}} = xegpu.load_nd %{{.*}} <{transpose = array}> {layout_result_0 = #xegpu.layout} : -// CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> +// CHECK: %{{.*}} = xegpu.load_nd %{{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : +// CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> func.func @load_with_transpose_effect(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index %cst = arith.constant dense<0.000000e+00> : vector<8x16xf32> @@ -108,7 +109,7 @@ gpu.module @test { // CHECK-NEXT: %[[CST0:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> // CHECK-NEXT: %[[T2:.*]] = xegpu.create_tdesc %[[ARG1]], %[[CST]] : memref<256xf16>, vector<16xindex> -> // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.scatter_tdesc_attr, #xegpu.layout> -// CHECK-NEXT: %{{.*}} = xegpu.load %[[T2]], %[[CST0]] {layout_result_0 = #xegpu.layout} +// CHECK-NEXT: %{{.*}} = xegpu.load %[[T2]], %[[CST0]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.scatter_tdesc_attr, #xegpu.layout>, vector<16xi1> -> vector<16x16xf16> func.func @load_gather_with_chunksize(%arg0: memref<8x16xf16>, %arg1: memref<256xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index @@ -135,7 +136,7 @@ gpu.module @test { // CHECK-NEXT: %[[CST0:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> // CHECK-NEXT: %[[T0:.*]] = xegpu.create_tdesc %[[ARG0]], %[[CST]] : memref<256xf32>, vector<16xindex> -> // CHECK-SAME: !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>, #xegpu.layout> -// CHECK-NEXT: %{{.*}} = xegpu.load %[[T0]], %[[CST0]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %{{.*}} = xegpu.load %[[T0]], %[[CST0]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>, #xegpu.layout>, vector<16xi1> -> vector<16xf32> func.func @load_gather_1d(%arg0: memref<256xf32>, %arg1: !xegpu.tensor_desc<16xf32>) { %cst = arith.constant dense<[0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240]> : vector<16xindex> @@ -183,9 +184,9 @@ gpu.module @test { // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { // CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> // CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> -// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64}> +// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64, layout = #xegpu.layout}> // CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> -// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> +// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64, layout = #xegpu.layout}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> func.func @scatter_ops_chunksize(%src: memref<256xf16>) { %1 = arith.constant dense<1>: vector<16xi1> %offset = arith.constant dense<12> : vector<16xindex> @@ -204,7 +205,7 @@ gpu.module @test { // CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> // CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] // CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> -// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] : vector<16xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> +// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{layout = #xegpu.layout}> : vector<16xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> func.func @scatter_ops(%src: memref<256xf16>) { %1 = arith.constant dense<1>: vector<16xi1> %offset = arith.constant dense<12> : vector<16xindex> @@ -217,13 +218,13 @@ func.func @scatter_ops(%src: memref<256xf16>) { gpu.module @test { // CHECK-LABEL: func.func @scatter_ops_custom_perm_layout( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { -// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> -// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> +// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> +// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> // CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] -// CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> +// CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> // CHECK: %[[ADD_RES:.*]] = arith.addf %[[LOAD_VEC]], %[[LOAD_VEC]] {layout_result_0 = #xegpu.layout} : vector<16xf16> // CHECK: xegpu.store %[[ADD_RES]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] -// CHECK-SAME <{layout = #xegpu.layout}> : vector<16xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> +// CHECK-SAME <{layout = #xegpu.layout}> : vector<16xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> func.func @scatter_ops_custom_perm_layout(%src: memref<256xf16>) { %1 = arith.constant dense<1>: vector<16xi1> %offset = arith.constant dense<12> : vector<16xindex> @@ -237,9 +238,9 @@ func.func @scatter_ops_custom_perm_layout(%src: memref<256xf16>) { gpu.module @test { // CHECK-LABEL: func.func @scatter_ops_preserve_load_perm_layout( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { -// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> -// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> -// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{layout = #xegpu.layout}> +// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<16xi1> +// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<12> : vector<16xindex> +// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] // CHECK-SAME: {layout_result_0 = #xegpu.layout} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> // CHECK: %[[ADD_RES:.*]] = arith.addf %[[LOAD_VEC]], %[[LOAD_VEC]] {layout_result_0 = #xegpu.layout} : vector<16xf16> // CHECK: xegpu.store %[[ADD_RES]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] @@ -256,9 +257,9 @@ func.func @scatter_ops_preserve_load_perm_layout(%src: memref<256xf16>) { // ----- gpu.module @test { // CHECK-LABEL: func.func @vector_bitcast_i16_to_f16( -// CHECK: %[[LOAD0:.*]] = xegpu.load_nd %{{.*}} {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD0:.*]] = xegpu.load_nd %{{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<8x16xi16, #xegpu.layout> -> vector<8x16xi16> -// CHECK: %[[LOAD1:.*]] = xegpu.load_nd %{{.*}} {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD1:.*]] = xegpu.load_nd %{{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<16x16xi16, #xegpu.layout> -> vector<16x16xi16> // CHECK: %{{.*}} = vector.bitcast %[[LOAD0]] {layout_result_0 = #xegpu.layout} // CHECK-SAME: vector<8x16xi16> to vector<8x16xf16> @@ -281,7 +282,7 @@ func.func @vector_bitcast_i16_to_f16(%arg0: memref<8x16xi16>, %arg1: memref<16x1 // ----- gpu.module @test { // CHECK-LABEL: func.func @vector_bitcast_i32_to_f16( -// CHECK: %[[LOAD:.*]] = xegpu.load_nd %{{.*}} {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD:.*]] = xegpu.load_nd %{{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<16x8xi32, #xegpu.layout> -> vector<16x8xi32> // CHECK-NEXT: %{{.*}} = vector.bitcast %[[LOAD]] {layout_result_0 = #xegpu.layout} // CHECK-SAME: vector<16x8xi32> to vector<16x16xf16> @@ -302,7 +303,7 @@ func.func @vector_bitcast_i32_to_f16(%arg0: memref<8x16xf16>, %arg1: memref<16x8 // ----- gpu.module @test { // CHECK-LABEL: func.func @vector_bitcast_i16_to_i32( -// CHECK: %[[LOAD:.*]] = xegpu.load_nd %{{.*}} {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD:.*]] = xegpu.load_nd %{{.*}} <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<8x32xi16, #xegpu.layout> -> vector<8x32xi16> // CHECK-NEXT: %{{.*}} = vector.bitcast %[[LOAD]] {layout_result_0 = #xegpu.layout} // CHECK-SAME: vector<8x32xi16> to vector<8x16xi32> @@ -339,9 +340,9 @@ gpu.module @test { // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<8x16xf16, #xegpu.layout>, // CHECK-SAME: %[[ARG1:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, // CHECK-SAME: %[[ARG2:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<8x16xf32, #xegpu.layout>) { -// CHECK: %[[T1:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK: %[[T1:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> -// CHECK-NEXT: %[[T2:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T2:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: %{{.*}} = arith.addf %[[T1]], %[[T2]] {layout_result_0 = #xegpu.layout} : vector<16x16xf16> func.func @binary_op_one_use(%arg0: !xegpu.tensor_desc<8x16xf16>, %arg1: !xegpu.tensor_desc<16x16xf16>, %arg2: !xegpu.tensor_desc<8x16xf32>) { @@ -362,9 +363,9 @@ gpu.module @test { // CHECK-SAME: %[[ARG2:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<8x16xf32, #xegpu.layout>, // CHECK-SAME: %[[ARG3:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>) { // CHECK: %[[T2:.*]] = arith.addf %{{.*}}, %{{.*}} {layout_result_0 = #xegpu.layout} : vector<16x16xf16> -// CHECK: %[[T3:.*]] = xegpu.dpas %{{.*}}, %[[T2]] {layout_result_0 = #xegpu.layout} : vector<8x16xf16>, vector<16x16xf16> -> vector<8x16xf32> -// CHECK-NEXT: xegpu.store_nd %[[T3]], %[[ARG2]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -// CHECK-NEXT: xegpu.store_nd %[[T2]], %[[ARG3]] : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> +// CHECK: %[[T3:.*]] = xegpu.dpas %{{.*}}, %[[T2]] {layout_a = #xegpu.layout, layout_b = #xegpu.layout, layout_result_0 = #xegpu.layout} : vector<8x16xf16>, vector<16x16xf16> -> vector<8x16xf32> +// CHECK-NEXT: xegpu.store_nd %[[T3]], %[[ARG2]] <{layout = #xegpu.layout}> : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> +// CHECK-NEXT: xegpu.store_nd %[[T2]], %[[ARG3]] <{layout = #xegpu.layout}> : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> func.func @binary_op_multiple_uses(%arg0: !xegpu.tensor_desc<8x16xf16>, %arg1: !xegpu.tensor_desc<16x16xf16>, %arg2: !xegpu.tensor_desc<8x16xf32>, %arg3: !xegpu.tensor_desc<16x16xf16>) { %0 = xegpu.load_nd %arg0 : !xegpu.tensor_desc<8x16xf16> -> vector<8x16xf16> %1 = xegpu.load_nd %arg1 : !xegpu.tensor_desc<16x16xf16> -> vector<16x16xf16> @@ -385,11 +386,11 @@ gpu.module @test { // CHECK-NEXT: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0.000000e+00> : vector<8x16xf32> // CHECK-NEXT: %[[T2:.*]]:3 = scf.for %{{.*}} iter_args(%[[ARG4:.*]] = %[[T0]], %[[ARG5:.*]] = %[[T1]], %[[ARG6:.*]] = %[[CST]]) -> // CHECK-SAME: (!xegpu.tensor_desc<8x16xf16, #xegpu.layout>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, vector<8x16xf32>) { -// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG4]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG4]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> -// CHECK-NEXT: %[[T5:.*]] = xegpu.load_nd %[[ARG5]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T5:.*]] = xegpu.load_nd %[[ARG5]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> -// CHECK-NEXT: %[[T6:.*]] = xegpu.dpas %[[T4]], %[[T5]], %[[ARG6]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T6:.*]] = xegpu.dpas %[[T4]], %[[T5]], %[[ARG6]] {layout_a = #xegpu.layout, layout_b = #xegpu.layout, layout_cd = #xegpu.layout, layout_result_0 = #xegpu.layout} : // CHECK-SAME: vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32> // CHECK-NEXT: %[[T7:.*]] = xegpu.update_nd_offset %[[ARG4]], [{{.*}}] : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> // CHECK-NEXT: %[[T8:.*]] = xegpu.update_nd_offset %[[ARG5]], [{{.*}}] : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> @@ -397,7 +398,7 @@ gpu.module @test { // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, vector<8x16xf32> // CHECK-NEXT: } {layout_result_2 = #xegpu.layout} // CHECK-NEXT: %[[T3:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -// CHECK-NEXT: xegpu.store_nd %[[T2]]#2, %[[T3]] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> +// CHECK-NEXT: xegpu.store_nd %[[T2]]#2, %[[T3]] <{layout = #xegpu.layout}> : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> func.func @for_op(%arg0: memref<8x128xf16>, %arg1: memref<128x16xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index %c128 = arith.constant 128 : index @@ -425,11 +426,11 @@ gpu.module @test { // CHECK-SAME: %[[ARG1:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, // CHECK-SAME: %[[ARG2:[0-9a-zA-Z]+]]: i1, %[[ARG3:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<8x16xf32, #xegpu.layout>) { // CHECK: %{{.*}} = scf.if %[[ARG2]] -> (vector<16x16xf16>) { -// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: scf.yield %[[T3]] : vector<16x16xf16> // CHECK-NEXT: } else { -// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: scf.yield %[[T4]] : vector<16x16xf16> // CHECK-NEXT: } {layout_result_0 = #xegpu.layout} @@ -455,11 +456,11 @@ gpu.module @test { // CHECK-SAME: %[[ARG2:[0-9a-zA-Z]+]]: i1, %[[ARG3:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<8x16xf32, #xegpu.layout>, // CHECK-SAME: %[[ARG4:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>) { // CHECK: %[[T1:.*]] = scf.if %[[ARG2]] -> (vector<16x16xf16>) { -// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: scf.yield %[[T3]] : vector<16x16xf16> // CHECK-NEXT: } else { -// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG1]] {layout_result_0 = #xegpu.layout} : +// CHECK-NEXT: %[[T4:.*]] = xegpu.load_nd %[[ARG1]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} : // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: scf.yield %[[T4]] : vector<16x16xf16> // CHECK-NEXT: } {layout_result_0 = #xegpu.layout} @@ -539,7 +540,7 @@ gpu.module @test { // CHECK-LABEL: func.func @prefetch_2d( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256x256xf16>) { // CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}, %{{.*}}] : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -// CHECK-NEXT: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> +// CHECK-NEXT: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, layout = #xegpu.layout}> : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> func.func @prefetch_2d(%arg0: memref<256x256xf16>){ %c0 = arith.constant 0 : index %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16> @@ -552,7 +553,7 @@ gpu.module @test { // CHECK-LABEL: func.func @prefetch_1d( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { // CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> -// CHECK-NEXT: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16xf16, #xegpu.layout> +// CHECK-NEXT: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint, layout = #xegpu.layout}> : !xegpu.tensor_desc<16xf16, #xegpu.layout> func.func @prefetch_1d(%arg0: memref<256xf16>){ %c0 = arith.constant 0 : index %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<256xf16> -> !xegpu.tensor_desc<16xf16> @@ -599,7 +600,7 @@ gpu.module @test { // CHECK-LABEL: func.func @vector_shape_cast_1d_to_2d_dim1_distributed( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, // CHECK-SAME: %[[ARG1:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>) { -// CHECK: %[[LOAD:.*]] = xegpu.load_nd %[[ARG0]] {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD:.*]] = xegpu.load_nd %[[ARG0]] <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: %[[REDUCE:.*]] = vector.multi_reduction , %[[LOAD]], %{{[0-9a-zA-Z]+}} // CHECK-SAME: {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [0]>} [0] : vector<16x16xf16> to vector<16xf16> @@ -621,7 +622,7 @@ gpu.module @test { // CHECK-LABEL: func.func @vector_shape_cast_1d_to_2d_dim0_broadcasted( // CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>, // CHECK-SAME: %[[ARG1:[0-9a-zA-Z]+]]: !xegpu.tensor_desc<16x16xf16, #xegpu.layout>) { -// CHECK: %[[LOAD:.*]] = xegpu.load_nd %arg0 {layout_result_0 = #xegpu.layout} +// CHECK: %[[LOAD:.*]] = xegpu.load_nd %arg0 <{layout = #xegpu.layout}> {layout_result_0 = #xegpu.layout} // CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> // CHECK-NEXT: %[[REDUCE:.*]] = vector.multi_reduction , %[[LOAD]], %{{[0-9a-zA-Z]+}} // CHECK-SAME: {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} [1]