Skip to content

Commit 02e5003

Browse files
committed
[mlir][linalg] Update pack and unpack documentation
* Clarified the `inner_dim_pos` attribute in the case of high dimensionality tensors. * Added a 5D examples to show-case the use-cases that triggered this updated. * Added a reminder for linalg.unpack that number of elements are not required to be the same between input/output due to padding being dropped. I encountered some odd variations of `linalg.pack` and `linalg.unpack` while working on some TFLite models and the definition in the documentation did not match what I saw pass in IR verification. The following changes reconcile those differences. Signed-off-by: Christopher McGirr <[email protected]>
1 parent 28f6f87 commit 02e5003

File tree

3 files changed

+114
-11
lines changed

3 files changed

+114
-11
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td

Lines changed: 40 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -94,11 +94,13 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [
9494
and optionally transposes the tiled source tensor dimensions.
9595

9696
`inner_dims_pos` (mandatory) specifies `k` source tensor dimensions that are
97-
being tiled, where `0 < k <= n`. The order of the dimensions matters:
98-
- The tiled dimensions (of size `inner_tiles`) are added to the end of the result
99-
tensor in the order in which they appear in `inner_dims_pos`.
97+
being tiled, where `0 < k <= n`.
10098
- `inner_dims_pos[i]` specifies the source tensor dimension tiled by
101-
`inner_tiles[i]`.
99+
`inner_tiles[i]` where `0 <= i < k`.
100+
- the resulting tiled source dimension maps to an outer dimension of the
101+
packed tensor in the order the non-tiled dimension appeared in the source
102+
tensor, i.e. `shape(result)[inner_dims_pos[i]]` is equal to
103+
`shape(source)[inner_dims_pos[i]] / inner_tiles[i]`.
102104

103105
`inner_tiles` (mandatory) specifies `k` tile sizes. These tile sizes
104106
correspond to the least significant ("inner") result tensor dimension sizes,
@@ -117,6 +119,16 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [
117119
into %dest : tensor<128x256xf32> -> tensor<16x8 x 8x32 xf32>
118120
// \ / \ /
119121
// outer dims inner dims
122+
// CHW to CHWhw
123+
%0 = linalg.pack %source inner_dims_pos = [2, 1] inner_tiles = [4, 2]
124+
into %dest : tensor<1x8x16xf32> -> tensor<1x2x4 x 4x2 xf32>
125+
// \ / \ /
126+
// outer dims inner dims
127+
// HCW to HCWhw
128+
%0 = linalg.pack %source inner_dims_pos = [2, 0] inner_tiles = [4, 2]
129+
into %dest : tensor<20x1x12xf32> -> tensor<10x1x3 x 4x2xf32>
130+
// \ / \ /
131+
// Outer Dims: 10x1x3 Inner Dims: 4x2
120132
```
121133

122134
`outer_dims_perm` (optional) specifies a permutation for the outer
@@ -246,12 +258,14 @@ def Linalg_UnPackOp : Linalg_RelayoutOp<"unpack"> {
246258
The "unpack" operation converts a source tensor of rank `n` with a tiled and
247259
packed layout to a result tensor of rank `n - k`.
248260

249-
`inner_dims_pos` (mandatory) specifies `k` source tensor dimensions with
250-
which the last `k` source tensor dimensions are combined, where
251-
`0 < k <= n/2`. Each `inner_dims_pos` element must be `>= 0` and `< n - k`.
252-
The order of the dimensions in `inner_dims_pos` matters: dimension
253-
`inner_dims_pos[i]` is combined with dimension `n - k + i` (assuming that
254-
`outer_dims_perm` is not specified).
261+
`inner_dims_pos` (mandatory) specifies `k` result tensor dimensions that
262+
were tiled with the `inner_tiles` to create the packed source tensor. The
263+
source tensor dimensions can be combined given `inner_dims_pos` as follows:
264+
the inner tile `shape(source)[n-k+i]` is combined with
265+
`shape(source)[inner_dims_pos[i]]` where `0 <= i < k` and stored at
266+
`shape(result)[inner_dims_pos[i]]`. The remaining dimensions are
267+
`shape(result)[j] = shape(source)[j]` where `0 <= j < n-k` and `j` is not in
268+
the set of `inner_dims_pos` indices.
255269

256270
`inner_tiles` (mandatory) specifies `k` tile sizes. These tile sizes
257271
correspond to the least significant ("inner") source tensor dimension sizes.
@@ -266,7 +280,11 @@ def Linalg_UnPackOp : Linalg_RelayoutOp<"unpack"> {
266280
dimensions. If specified, it must have `n - k` elements. If specified, this
267281
permutation is applied before combining any dimensions.
268282

269-
Example:
283+
Note, the amount of elements in the source (packed tensor) and the result
284+
(unpacked) can be unequal, i.e. `SizeOf(source) >= SizeOf(result)`. As
285+
the unpack operation may drop any padding introduced by the pack operation.
286+
287+
Examples:
270288

271289
```mlir
272290
// NCnc to NC:
@@ -277,6 +295,17 @@ def Linalg_UnPackOp : Linalg_RelayoutOp<"unpack"> {
277295
%0 = linalg.unpack %source outer_dims_perm = [1, 0] inner_dims_pos = [0, 1]
278296
inner_tiles = [8, 32] into %dest
279297
: tensor<8x16x8x32xf32> -> tensor<128x256xf32>
298+
299+
// CHW to CHWhw:
300+
%0 = linalg.unpack %source inner_dims_pos = [2, 1] inner_tiles = [4, 2]
301+
into %dest : tensor<1x3x2x4x2xf32> -> tensor<1x5x7xf32>
302+
// / \
303+
// Outer Dims: 1x3x2 Inner Dims: 4x2
304+
// HCW to HCWhw
305+
%0 = linalg.unpack %source inner_dims_pos = [2, 0] inner_tiles = [4, 2]
306+
into %dest : tensor<10x1x3 x 4x2xf32> -> tensor<20x1x12xf32>
307+
// / \
308+
// Outer Dims: 10x1x3 Inner Dims: 4x2
280309
```
281310
}];
282311
let arguments = (ins AnyRankedTensor:$source,

mlir/test/Dialect/Linalg/invalid.mlir

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1824,6 +1824,17 @@ func.func @unpack_invalid_outer_dims_perm(%source: tensor<128x256xf32>, %dest: t
18241824

18251825
// -----
18261826

1827+
// Here we have the source tensor being tiled as: `source[1] / 32` and `source[0] / 16` but the inner_dims_pos does not imply
1828+
// a transpose of the outer dimensions for the result tensor. The tiled dimensions appear in the result tensor in the order
1829+
// they appear in the source tensor, i.e. 16x4x32x16
1830+
func.func @pack_invalid_result_shape(%input: tensor<256x128xf32>, %output: tensor<4x16x32x16xf32>) -> tensor<4x16x32x16xf32> {
1831+
// expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<16x4x32x16xf32>', got 'tensor<4x16x32x16xf32>'}}
1832+
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [32, 16] into %output : tensor<256x128xf32> -> tensor<4x16x32x16xf32>
1833+
return %0 : tensor<4x16x32x16xf32>
1834+
}
1835+
1836+
// -----
1837+
18271838
func.func @pack_invalid(%input: tensor<256x128xf32>, %output: tensor<8x8x32x16xf32>) -> tensor<8x8x32x16xf32> {
18281839
// expected-error@+1 {{the shape of output is not large enough to hold the packed data. Expected at least 'tensor<8x8x16x32xf32>', got 'tensor<8x8x32x16xf32>'}}
18291840
%0 = linalg.pack %input inner_dims_pos = [1, 0] inner_tiles = [16, 32] into %output : tensor<256x128xf32> -> tensor<8x8x32x16xf32>

mlir/test/Dialect/Linalg/named-ops.mlir

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2771,6 +2771,69 @@ func.func @pad_and_pack_partially_dynamic(%source: tensor<?x?xf32>, %dest: tenso
27712771

27722772
// -----
27732773

2774+
func.func @pack_descending_inner_dims_with_padding(%source: tensor<1x5x7xf32>, %dest: tensor<1x3x2x4x2xf32>, %pad: f32) -> tensor<1x3x2x4x2xf32> {
2775+
%0 = linalg.pack %source padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [4, 2] into %dest : tensor<1x5x7xf32> -> tensor<1x3x2x4x2xf32>
2776+
return %0 : tensor<1x3x2x4x2xf32>
2777+
}
2778+
2779+
// CHECK-LABEL: func.func @pack_descending_inner_dims_with_padding(
2780+
// CHECK-SAME: %[[SOURCE:.*]]: tensor<1x5x7xf32>,
2781+
// CHECK-SAME: %[[DEST:.*]]: tensor<1x3x2x4x2xf32>,
2782+
// CHECK-SAME: %[[PAD:.*]]: f32)
2783+
// CHECK: %{{.*}} = linalg.pack
2784+
// CHECK-SAME: inner_dims_pos = [2, 1]
2785+
// CHECK-SAME: inner_tiles = [4, 2]
2786+
// CHECK-SAME: into %[[DEST]] : tensor<1x5x7xf32> -> tensor<1x3x2x4x2xf32>
2787+
2788+
// -----
2789+
2790+
// The function suffix "with_padding" refers to the padding that was introduced by the pack operation. But here
2791+
// we are dropping the padding. Creating a tensor with less elements than what we started with.
2792+
func.func @unpack_descending_inner_dims_with_padding(%source: tensor<1x3x2x4x2xf32>, %dest: tensor<1x5x7xf32>) -> tensor<1x5x7xf32> {
2793+
%0 = linalg.unpack %source inner_dims_pos = [2, 1] inner_tiles = [4, 2] into %dest : tensor<1x3x2x4x2xf32> -> tensor<1x5x7xf32>
2794+
return %0 : tensor<1x5x7xf32>
2795+
}
2796+
2797+
// CHECK-LABEL: func.func @unpack_descending_inner_dims_with_padding(
2798+
// CHECK-SAME: %[[SOURCE:.*]]: tensor<1x3x2x4x2xf32>,
2799+
// CHECK-SAME: %[[DEST:.*]]: tensor<1x5x7xf32>)
2800+
// CHECK: %{{.*}} = linalg.unpack
2801+
// CHECK-SAME: inner_dims_pos = [2, 1]
2802+
// CHECK-SAME: inner_tiles = [4, 2]
2803+
// CHECK-SAME: into %[[DEST]] : tensor<1x3x2x4x2xf32> -> tensor<1x5x7xf32>
2804+
2805+
// -----
2806+
2807+
func.func @pack_non_adjacent_inner_dims(%source: tensor<20x1x12xf32>, %dest: tensor<10x1x3x4x2xf32>) -> tensor<10x1x3x4x2xf32> {
2808+
%0 = linalg.pack %source inner_dims_pos = [2, 0] inner_tiles = [4, 2] into %dest : tensor<20x1x12xf32> -> tensor<10x1x3x4x2xf32>
2809+
return %0 : tensor<10x1x3x4x2xf32>
2810+
}
2811+
2812+
// CHECK-LABEL: func.func @pack_non_adjacent_inner_dims(
2813+
// CHECK-SAME: %[[SOURCE:.*]]: tensor<20x1x12xf32>,
2814+
// CHECK-SAME: %[[DEST:.*]]: tensor<10x1x3x4x2xf32>)
2815+
// CHECK: %{{.*}} = linalg.pack
2816+
// CHECK-SAME: inner_dims_pos = [2, 0]
2817+
// CHECK-SAME: inner_tiles = [4, 2]
2818+
// CHECK-SAME: into %[[DEST]] : tensor<20x1x12xf32> -> tensor<10x1x3x4x2xf32>
2819+
2820+
// -----
2821+
2822+
func.func @unpack_non_adjacent_inner_dims(%source: tensor<10x1x3x4x2xf32>, %dest: tensor<20x1x12xf32>) -> tensor<20x1x12xf32> {
2823+
%0 = linalg.unpack %source inner_dims_pos = [2, 0] inner_tiles = [4, 2] into %dest : tensor<10x1x3x4x2xf32> -> tensor<20x1x12xf32>
2824+
return %0 : tensor<20x1x12xf32>
2825+
}
2826+
2827+
// CHECK-LABEL: func.func @unpack_non_adjacent_inner_dims(
2828+
// CHECK-SAME: %[[SOURCE:.*]]: tensor<10x1x3x4x2xf32>,
2829+
// CHECK-SAME: %[[DEST:.*]]: tensor<20x1x12xf32>)
2830+
// CHECK: %{{.*}} = linalg.unpack
2831+
// CHECK-SAME: inner_dims_pos = [2, 0]
2832+
// CHECK-SAME: inner_tiles = [4, 2]
2833+
// CHECK-SAME: into %[[DEST]] : tensor<10x1x3x4x2xf32> -> tensor<20x1x12xf32>
2834+
2835+
// -----
2836+
27742837
func.func @unpack_fully_dynamic(%source: tensor<?x?x?x?xf32>, %dest: tensor<?x?xf32>, %tile_n : index, %tile_m : index) -> tensor<?x?xf32> {
27752838
%0 = linalg.unpack %source inner_dims_pos = [0, 1] inner_tiles = [%tile_n, %tile_m] into %dest : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
27762839
return %0 : tensor<?x?xf32>

0 commit comments

Comments
 (0)