@@ -287,7 +287,7 @@ func.func @mmt4d_bf16bf16f32(%arg0 : tensor<?x?x16x2xbf16>, %arg1 : tensor<?x?x1
287287func.func @pack_i8i8_x86 (%arg0 : tensor <?x?xi8 >, %arg1 : tensor <?x?x7 x8 xi8 >, %arg2 : i8 ) -> tensor <?x?x7 x8 xi8 > attributes {
288288 hal.executable.target = #hal.executable.target <" llvm-cpu" , " xyz" , {ukernels = " all" , target_triple =" x86_64-xyz-xyz" , cpu_features =" +avx512f" }>
289289} {
290- %result = tensor .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
290+ %result = linalg .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
291291 : tensor <?x?xi8 > -> tensor <?x?x7 x8 xi8 >
292292 func.return %result : tensor <?x?x7 x8 xi8 >
293293}
@@ -315,7 +315,7 @@ func.func @pack_i8i8_x86(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %ar
315315func.func @pack_i8i8 (%arg0 : tensor <?x?xi8 >, %arg1 : tensor <?x?x7 x8 xi8 >, %arg2 : i8 ) -> tensor <?x?x7 x8 xi8 > attributes {
316316 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
317317} {
318- %result = tensor .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
318+ %result = linalg .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
319319 : tensor <?x?xi8 > -> tensor <?x?x7 x8 xi8 >
320320 func.return %result : tensor <?x?x7 x8 xi8 >
321321}
@@ -344,7 +344,7 @@ func.func @pack_i8i8(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %arg2 :
344344func.func @pack_f16f16 (%arg0 : tensor <?x?xf16 >, %arg1 : tensor <?x?x7 x8 xf16 >, %arg2 : f16 ) -> tensor <?x?x7 x8 xf16 > attributes {
345345 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
346346} {
347- %result = tensor .pack %arg0 padding_value (%arg2 : f16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
347+ %result = linalg .pack %arg0 padding_value (%arg2 : f16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
348348 : tensor <?x?xf16 > -> tensor <?x?x7 x8 xf16 >
349349 func.return %result : tensor <?x?x7 x8 xf16 >
350350}
@@ -373,7 +373,7 @@ func.func @pack_f16f16(%arg0 : tensor<?x?xf16>, %arg1 : tensor<?x?x7x8xf16>, %ar
373373func.func @pack_bf16bf16 (%arg0 : tensor <?x?xbf16 >, %arg1 : tensor <?x?x7 x8 xbf16 >, %arg2 : bf16 ) -> tensor <?x?x7 x8 xbf16 > attributes {
374374 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
375375} {
376- %result = tensor .pack %arg0 padding_value (%arg2 : bf16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
376+ %result = linalg .pack %arg0 padding_value (%arg2 : bf16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
377377 : tensor <?x?xbf16 > -> tensor <?x?x7 x8 xbf16 >
378378 func.return %result : tensor <?x?x7 x8 xbf16 >
379379}
@@ -401,7 +401,7 @@ func.func @pack_bf16bf16(%arg0 : tensor<?x?xbf16>, %arg1 : tensor<?x?x7x8xbf16>,
401401func.func @pack_i32i32_transpose_inner (%arg0 : tensor <?x?xi32 >, %arg1 : tensor <?x?x7 x8 xi32 >, %arg2 : i32 ) -> tensor <?x?x7 x8 xi32 > attributes {
402402 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
403403} {
404- %result = tensor .pack %arg0 padding_value (%arg2 : i32 ) inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
404+ %result = linalg .pack %arg0 padding_value (%arg2 : i32 ) inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
405405 : tensor <?x?xi32 > -> tensor <?x?x7 x8 xi32 >
406406 func.return %result : tensor <?x?x7 x8 xi32 >
407407}
@@ -430,19 +430,19 @@ func.func @pack_i32i32_transpose_inner(%arg0 : tensor<?x?xi32>, %arg1 : tensor<?
430430func.func @pack_f32f32_transpose_inner_and_outer (%arg0 : tensor <?x?xf32 >, %arg1 : tensor <?x?x7 x8 xf32 >, %arg2 : f32 ) -> tensor <?x?x7 x8 xf32 > attributes {
431431 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
432432} {
433- %result = tensor .pack %arg0 padding_value (%arg2 : f32 ) outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
433+ %result = linalg .pack %arg0 padding_value (%arg2 : f32 ) outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
434434 : tensor <?x?xf32 > -> tensor <?x?x7 x8 xf32 >
435435 func.return %result : tensor <?x?x7 x8 xf32 >
436436}
437437
438438// -----
439439
440- // Check that tensor .pack is not lowered to a microkernel by default - it should
440+ // Check that linalg .pack is not lowered to a microkernel by default - it should
441441// only be on VMVX.
442442// CHECK: func @unpack_f16f16_default
443- // CHECK: tensor .unpack
443+ // CHECK: linalg .unpack
444444func.func @unpack_f16f16_default (%arg0 : tensor <?x?x7 x8 xf16 >, %arg1 : tensor <?x?xf16 >) -> tensor <?x?xf16 > {
445- %result = tensor .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
445+ %result = linalg .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
446446 : tensor <?x?x7 x8 xf16 > -> tensor <?x?xf16 >
447447 func.return %result : tensor <?x?xf16 >
448448}
@@ -468,7 +468,7 @@ func.func @unpack_f16f16_default(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?
468468func.func @unpack_f16f16 (%arg0 : tensor <?x?x7 x8 xf16 >, %arg1 : tensor <?x?xf16 >) -> tensor <?x?xf16 > attributes {
469469 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
470470} {
471- %result = tensor .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
471+ %result = linalg .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
472472 : tensor <?x?x7 x8 xf16 > -> tensor <?x?xf16 >
473473 func.return %result : tensor <?x?xf16 >
474474}
@@ -494,7 +494,7 @@ func.func @unpack_f16f16(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?xf16>) -
494494func.func @unpack_i32i32_transpose_inner (%arg0 : tensor <?x?x7 x8 xi32 >, %arg1 : tensor <?x?xi32 >) -> tensor <?x?xi32 > attributes {
495495 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
496496} {
497- %result = tensor .unpack %arg0 inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
497+ %result = linalg .unpack %arg0 inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
498498 : tensor <?x?x7 x8 xi32 > -> tensor <?x?xi32 >
499499 func.return %result : tensor <?x?xi32 >
500500}
@@ -520,7 +520,7 @@ func.func @unpack_i32i32_transpose_inner(%arg0 : tensor<?x?x7x8xi32>, %arg1 : te
520520func.func @unpack_f32f32_transpose_inner_and_outer (%arg0 : tensor <?x?x7 x8 xf32 >, %arg1 : tensor <?x?xf32 >) -> tensor <?x?xf32 > attributes {
521521 hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
522522} {
523- %result = tensor .unpack %arg0 outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
523+ %result = linalg .unpack %arg0 outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
524524 : tensor <?x?x7 x8 xf32 > -> tensor <?x?xf32 >
525525 func.return %result : tensor <?x?xf32 >
526526}
0 commit comments