@@ -858,8 +858,6 @@ module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 1 :
858858module attributes {" triton_gpu.num-ctas" = 1 : i32 , " triton_gpu.num-warps" = 4 : i32 } {
859859 // CHECK-LABEL: convert_layout_slice_mmav2_blocked_reg
860860 tt.func @convert_layout_slice_mmav2_blocked_reg (%arg0: tensor <1 xf16 , #slice >) {
861- // CHECK-NOT: st.shared
862- // CHECK-NOT: llvm.load
863861 %0 = triton_gpu.convert_layout %arg0 : tensor <1 xf16 , #slice > -> tensor <1 xf16 , #blocked >
864862 tt.return
865863 }
@@ -903,8 +901,6 @@ module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 4 :
903901module attributes {" triton_gpu.num-ctas" = 1 : i32 , " triton_gpu.num-warps" = 4 : i32 } {
904902 // CHECK-LABEL: convert_layout_mmav3_mmav3_2
905903 tt.func @convert_layout_mmav3_mmav3_2 (%arg0: tensor <16 x16 xf16 , #mma1 >) {
906- // CHECK-NOT: st.shared
907- // CHECK-NOT: llvm.load
908904 %0 = triton_gpu.convert_layout %arg0 : tensor <16 x16 xf16 , #mma1 > -> tensor <16 x16 xf16 , #mma0 >
909905 tt.return
910906 }
@@ -919,7 +915,6 @@ module attributes {"triton_gpu.num-ctas" = 1 : i32, "triton_gpu.num-warps" = 4 :
919915 // CHECK-LABEL: convert_layout_mmav3_mmav3_3
920916 tt.func @convert_layout_mmav3_mmav3_3 (%arg0: tensor <1 x64 xf16 , #mma1 >) {
921917 // CHECK-NOT: st.shared
922- // CHECK-NOT: llvm.load
923918 %0 = triton_gpu.convert_layout %arg0 : tensor <1 x64 xf16 , #mma1 > -> tensor <1 x64 xf16 , #mma0 >
924919 tt.return
925920 }
0 commit comments