@@ -13,7 +13,7 @@ define <vscale x 16 x i8> @test_svadd_i8(<vscale x 16 x i8> %Zn, <vscale x 16 x
13
13
; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z0
14
14
; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY [[COPY1]]
15
15
; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr_3b = COPY [[COPY]]
16
- ; CHECK-NEXT: INLINEASM &"add $0.b, $1.b, $2.b", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %2, 5767177 /* reguse:ZPR */, [[COPY2]], 6357001 /* reguse:ZPR_3b */, [[COPY3]]
16
+ ; CHECK-NEXT: INLINEASM &"add $0.b, $1.b, $2.b", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %2, 5701641 /* reguse:ZPR */, [[COPY2]], 6291465 /* reguse:ZPR_3b */, [[COPY3]]
17
17
; CHECK-NEXT: $z0 = COPY %2
18
18
; CHECK-NEXT: RET_ReallyLR implicit $z0
19
19
%1 = tail call <vscale x 16 x i8 > asm "add $0.b, $1.b, $2.b" , "=w,w,y" (<vscale x 16 x i8 > %Zn , <vscale x 16 x i8 > %Zm )
@@ -29,7 +29,7 @@ define <vscale x 2 x i64> @test_svsub_i64(<vscale x 2 x i64> %Zn, <vscale x 2 x
29
29
; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z0
30
30
; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY [[COPY1]]
31
31
; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr_4b = COPY [[COPY]]
32
- ; CHECK-NEXT: INLINEASM &"sub $0.d, $1.d, $2.d", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %2, 5767177 /* reguse:ZPR */, [[COPY2]], 6029321 /* reguse:ZPR_4b */, [[COPY3]]
32
+ ; CHECK-NEXT: INLINEASM &"sub $0.d, $1.d, $2.d", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %2, 5701641 /* reguse:ZPR */, [[COPY2]], 5963785 /* reguse:ZPR_4b */, [[COPY3]]
33
33
; CHECK-NEXT: $z0 = COPY %2
34
34
; CHECK-NEXT: RET_ReallyLR implicit $z0
35
35
%1 = tail call <vscale x 2 x i64 > asm "sub $0.d, $1.d, $2.d" , "=w,w,x" (<vscale x 2 x i64 > %Zn , <vscale x 2 x i64 > %Zm )
@@ -45,7 +45,7 @@ define <vscale x 8 x half> @test_svfmul_f16(<vscale x 8 x half> %Zn, <vscale x 8
45
45
; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z0
46
46
; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY [[COPY1]]
47
47
; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr_3b = COPY [[COPY]]
48
- ; CHECK-NEXT: INLINEASM &"fmul $0.h, $1.h, $2.h", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %2, 5767177 /* reguse:ZPR */, [[COPY2]], 6357001 /* reguse:ZPR_3b */, [[COPY3]]
48
+ ; CHECK-NEXT: INLINEASM &"fmul $0.h, $1.h, $2.h", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %2, 5701641 /* reguse:ZPR */, [[COPY2]], 6291465 /* reguse:ZPR_3b */, [[COPY3]]
49
49
; CHECK-NEXT: $z0 = COPY %2
50
50
; CHECK-NEXT: RET_ReallyLR implicit $z0
51
51
%1 = tail call <vscale x 8 x half > asm "fmul $0.h, $1.h, $2.h" , "=w,w,y" (<vscale x 8 x half > %Zn , <vscale x 8 x half > %Zm )
@@ -61,7 +61,7 @@ define <vscale x 4 x float> @test_svfmul_f(<vscale x 4 x float> %Zn, <vscale x 4
61
61
; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z0
62
62
; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY [[COPY1]]
63
63
; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr_4b = COPY [[COPY]]
64
- ; CHECK-NEXT: INLINEASM &"fmul $0.s, $1.s, $2.s", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %2, 5767177 /* reguse:ZPR */, [[COPY2]], 6029321 /* reguse:ZPR_4b */, [[COPY3]]
64
+ ; CHECK-NEXT: INLINEASM &"fmul $0.s, $1.s, $2.s", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %2, 5701641 /* reguse:ZPR */, [[COPY2]], 5963785 /* reguse:ZPR_4b */, [[COPY3]]
65
65
; CHECK-NEXT: $z0 = COPY %2
66
66
; CHECK-NEXT: RET_ReallyLR implicit $z0
67
67
%1 = tail call <vscale x 4 x float > asm "fmul $0.s, $1.s, $2.s" , "=w,w,x" (<vscale x 4 x float > %Zn , <vscale x 4 x float > %Zm )
@@ -79,7 +79,7 @@ define <vscale x 8 x half> @test_svfadd_f16(<vscale x 16 x i1> %Pg, <vscale x 8
79
79
; CHECK-NEXT: [[COPY3:%[0-9]+]]:ppr_3b = COPY [[COPY2]]
80
80
; CHECK-NEXT: [[COPY4:%[0-9]+]]:zpr = COPY [[COPY1]]
81
81
; CHECK-NEXT: [[COPY5:%[0-9]+]]:zpr = COPY [[COPY]]
82
- ; CHECK-NEXT: INLINEASM &"fadd $0.h, $1/m, $2.h, $3.h", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %3, 720905 /* reguse:PPR_3b */, [[COPY3]], 5767177 /* reguse:ZPR */, [[COPY4]], 5767177 /* reguse:ZPR */, [[COPY5]]
82
+ ; CHECK-NEXT: INLINEASM &"fadd $0.h, $1/m, $2.h, $3.h", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %3, 655369 /* reguse:PPR_3b */, [[COPY3]], 5701641 /* reguse:ZPR */, [[COPY4]], 5701641 /* reguse:ZPR */, [[COPY5]]
83
83
; CHECK-NEXT: $z0 = COPY %3
84
84
; CHECK-NEXT: RET_ReallyLR implicit $z0
85
85
%1 = tail call <vscale x 8 x half > asm "fadd $0.h, $1/m, $2.h, $3.h" , "=w,@3Upl,w,w" (<vscale x 16 x i1 > %Pg , <vscale x 8 x half > %Zn , <vscale x 8 x half > %Zm )
@@ -95,7 +95,7 @@ define <vscale x 4 x i32> @test_incp(<vscale x 16 x i1> %Pg, <vscale x 4 x i32>
95
95
; CHECK-NEXT: [[COPY1:%[0-9]+]]:ppr = COPY $p0
96
96
; CHECK-NEXT: [[COPY2:%[0-9]+]]:ppr = COPY [[COPY1]]
97
97
; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr = COPY [[COPY]]
98
- ; CHECK-NEXT: INLINEASM &"incp $0.s, $1", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %2, 458761 /* reguse:PPR */, [[COPY2]], 2147483657 /* reguse tiedto:$0 */, [[COPY3]](tied-def 3)
98
+ ; CHECK-NEXT: INLINEASM &"incp $0.s, $1", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %2, 393225 /* reguse:PPR */, [[COPY2]], 2147483657 /* reguse tiedto:$0 */, [[COPY3]](tied-def 3)
99
99
; CHECK-NEXT: $z0 = COPY %2
100
100
; CHECK-NEXT: RET_ReallyLR implicit $z0
101
101
%1 = tail call <vscale x 4 x i32 > asm "incp $0.s, $1" , "=w,@3Upa,0" (<vscale x 16 x i1 > %Pg , <vscale x 4 x i32 > %Zn )
@@ -113,7 +113,7 @@ define <vscale x 8 x half> @test_svfadd_f16_Uph_constraint(<vscale x 16 x i1> %P
113
113
; CHECK-NEXT: [[COPY3:%[0-9]+]]:ppr_p8to15 = COPY [[COPY2]]
114
114
; CHECK-NEXT: [[COPY4:%[0-9]+]]:zpr = COPY [[COPY1]]
115
115
; CHECK-NEXT: [[COPY5:%[0-9]+]]:zpr = COPY [[COPY]]
116
- ; CHECK-NEXT: INLINEASM &"fadd $0.h, $1/m, $2.h, $3.h", 0 /* attdialect */, 5767178 /* regdef:ZPR */, def %3, 786441 /* reguse:PPR_p8to15 */, [[COPY3]], 5767177 /* reguse:ZPR */, [[COPY4]], 5767177 /* reguse:ZPR */, [[COPY5]]
116
+ ; CHECK-NEXT: INLINEASM &"fadd $0.h, $1/m, $2.h, $3.h", 0 /* attdialect */, 5701642 /* regdef:ZPR */, def %3, 720905 /* reguse:PPR_p8to15 */, [[COPY3]], 5701641 /* reguse:ZPR */, [[COPY4]], 5701641 /* reguse:ZPR */, [[COPY5]]
117
117
; CHECK-NEXT: $z0 = COPY %3
118
118
; CHECK-NEXT: RET_ReallyLR implicit $z0
119
119
%1 = tail call <vscale x 8 x half > asm "fadd $0.h, $1/m, $2.h, $3.h" , "=w,@3Uph,w,w" (<vscale x 16 x i1 > %Pg , <vscale x 8 x half > %Zn , <vscale x 8 x half > %Zm )
0 commit comments