6
6
;;; We test VMVivl and VMVivl_v, and VMVivml_v instructions.
7
7
8
8
; Function Attrs: nounwind
9
- define void @vmv_vsvl (i8* %0 , i64 %1 , i32 signext %2 ) {
9
+ define void @vmv_vsvl (i8* %0 , i32 signext %1 ) {
10
10
; CHECK-LABEL: vmv_vsvl:
11
11
; CHECK: # %bb.0:
12
- ; CHECK-NEXT: lea %s1 , 256
13
- ; CHECK-NEXT: lvl %s1
12
+ ; CHECK-NEXT: lea %s2 , 256
13
+ ; CHECK-NEXT: lvl %s2
14
14
; CHECK-NEXT: vld %v0, 8, %s0
15
- ; CHECK-NEXT: vmv %v0, 31, %v0
15
+ ; CHECK-NEXT: and %s1, %s1, (32)0
16
+ ; CHECK-NEXT: vmv %v0, %s1, %v0
16
17
; CHECK-NEXT: vst %v0, 8, %s0
17
18
; CHECK-NEXT: b.l.t (, %s10)
18
- %4 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
19
- %5 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvl (i32 31 , <256 x double > %4 , i32 256 )
20
- tail call void @llvm.ve.vl.vst.vssl (<256 x double > %5 , i64 8 , i8* %0 , i32 256 )
19
+ %3 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
20
+ %4 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvl (i32 %1 , <256 x double > %3 , i32 256 )
21
+ tail call void @llvm.ve.vl.vst.vssl (<256 x double > %4 , i64 8 , i8* %0 , i32 256 )
21
22
ret void
22
23
}
23
24
@@ -31,20 +32,37 @@ declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32)
31
32
declare void @llvm.ve.vl.vst.vssl (<256 x double >, i64 , i8* , i32 )
32
33
33
34
; Function Attrs: nounwind
34
- define void @vmv_vsvvl (i8* %0 , i32 signext %1 ) {
35
- ; CHECK-LABEL: vmv_vsvvl :
35
+ define void @vmv_vsvl_imm (i8* %0 ) {
36
+ ; CHECK-LABEL: vmv_vsvl_imm :
36
37
; CHECK: # %bb.0:
37
38
; CHECK-NEXT: lea %s1, 256
38
39
; CHECK-NEXT: lvl %s1
39
40
; CHECK-NEXT: vld %v0, 8, %s0
40
- ; CHECK-NEXT: lea %s2, 128
41
- ; CHECK-NEXT: lvl %s2
42
41
; CHECK-NEXT: vmv %v0, 31, %v0
43
- ; CHECK-NEXT: lvl %s1
42
+ ; CHECK-NEXT: vst %v0, 8, %s0
43
+ ; CHECK-NEXT: b.l.t (, %s10)
44
+ %2 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
45
+ %3 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvl (i32 31 , <256 x double > %2 , i32 256 )
46
+ tail call void @llvm.ve.vl.vst.vssl (<256 x double > %3 , i64 8 , i8* %0 , i32 256 )
47
+ ret void
48
+ }
49
+
50
+ ; Function Attrs: nounwind
51
+ define void @vmv_vsvvl (i8* %0 , i32 signext %1 ) {
52
+ ; CHECK-LABEL: vmv_vsvvl:
53
+ ; CHECK: # %bb.0:
54
+ ; CHECK-NEXT: lea %s2, 256
55
+ ; CHECK-NEXT: lvl %s2
56
+ ; CHECK-NEXT: vld %v0, 8, %s0
57
+ ; CHECK-NEXT: and %s1, %s1, (32)0
58
+ ; CHECK-NEXT: lea %s3, 128
59
+ ; CHECK-NEXT: lvl %s3
60
+ ; CHECK-NEXT: vmv %v0, %s1, %v0
61
+ ; CHECK-NEXT: lvl %s2
44
62
; CHECK-NEXT: vst %v0, 8, %s0
45
63
; CHECK-NEXT: b.l.t (, %s10)
46
64
%3 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
47
- %4 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvvl (i32 31 , <256 x double > %3 , <256 x double > %3 , i32 128 )
65
+ %4 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvvl (i32 %1 , <256 x double > %3 , <256 x double > %3 , i32 128 )
48
66
tail call void @llvm.ve.vl.vst.vssl (<256 x double > %4 , i64 8 , i8* %0 , i32 256 )
49
67
ret void
50
68
}
@@ -53,23 +71,62 @@ define void @vmv_vsvvl(i8* %0, i32 signext %1) {
53
71
declare <256 x double > @llvm.ve.vl.vmv.vsvvl (i32 , <256 x double >, <256 x double >, i32 )
54
72
55
73
; Function Attrs: nounwind
56
- define void @vmv_vsvmvl (i8* %0 , i32 signext %1 ) {
57
- ; CHECK-LABEL: vmv_vsvmvl :
74
+ define void @vmv_vsvvl_imm (i8* %0 ) {
75
+ ; CHECK-LABEL: vmv_vsvvl_imm :
58
76
; CHECK: # %bb.0:
59
77
; CHECK-NEXT: lea %s1, 256
60
78
; CHECK-NEXT: lvl %s1
61
79
; CHECK-NEXT: vld %v0, 8, %s0
62
80
; CHECK-NEXT: lea %s2, 128
63
81
; CHECK-NEXT: lvl %s2
64
- ; CHECK-NEXT: vmv %v0, 31, %v0, %vm1
82
+ ; CHECK-NEXT: vmv %v0, 31, %v0
65
83
; CHECK-NEXT: lvl %s1
66
84
; CHECK-NEXT: vst %v0, 8, %s0
85
+ ; CHECK-NEXT: b.l.t (, %s10)
86
+ %2 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
87
+ %3 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvvl (i32 31 , <256 x double > %2 , <256 x double > %2 , i32 128 )
88
+ tail call void @llvm.ve.vl.vst.vssl (<256 x double > %3 , i64 8 , i8* %0 , i32 256 )
89
+ ret void
90
+ }
91
+
92
+ ; Function Attrs: nounwind
93
+ define void @vmv_vsvmvl (i8* %0 , i32 signext %1 ) {
94
+ ; CHECK-LABEL: vmv_vsvmvl:
95
+ ; CHECK: # %bb.0:
96
+ ; CHECK-NEXT: lea %s2, 256
97
+ ; CHECK-NEXT: lvl %s2
98
+ ; CHECK-NEXT: vld %v0, 8, %s0
99
+ ; CHECK-NEXT: and %s1, %s1, (32)0
100
+ ; CHECK-NEXT: lea %s3, 128
101
+ ; CHECK-NEXT: lvl %s3
102
+ ; CHECK-NEXT: vmv %v0, %s1, %v0, %vm1
103
+ ; CHECK-NEXT: lvl %s2
104
+ ; CHECK-NEXT: vst %v0, 8, %s0
67
105
; CHECK-NEXT: b.l.t (, %s10)
68
106
%3 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
69
- %4 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvmvl (i32 31 , <256 x double > %3 , <256 x i1 > undef , <256 x double > %3 , i32 128 )
107
+ %4 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvmvl (i32 %1 , <256 x double > %3 , <256 x i1 > undef , <256 x double > %3 , i32 128 )
70
108
tail call void @llvm.ve.vl.vst.vssl (<256 x double > %4 , i64 8 , i8* %0 , i32 256 )
71
109
ret void
72
110
}
73
111
74
112
; Function Attrs: nounwind readnone
75
113
declare <256 x double > @llvm.ve.vl.vmv.vsvmvl (i32 , <256 x double >, <256 x i1 >, <256 x double >, i32 )
114
+
115
+ ; Function Attrs: nounwind
116
+ define void @vmv_vsvmvl_imm (i8* %0 ) {
117
+ ; CHECK-LABEL: vmv_vsvmvl_imm:
118
+ ; CHECK: # %bb.0:
119
+ ; CHECK-NEXT: lea %s1, 256
120
+ ; CHECK-NEXT: lvl %s1
121
+ ; CHECK-NEXT: vld %v0, 8, %s0
122
+ ; CHECK-NEXT: lea %s2, 128
123
+ ; CHECK-NEXT: lvl %s2
124
+ ; CHECK-NEXT: vmv %v0, 31, %v0, %vm1
125
+ ; CHECK-NEXT: lvl %s1
126
+ ; CHECK-NEXT: vst %v0, 8, %s0
127
+ ; CHECK-NEXT: b.l.t (, %s10)
128
+ %2 = tail call fast <256 x double > @llvm.ve.vl.vld.vssl (i64 8 , i8* %0 , i32 256 )
129
+ %3 = tail call fast <256 x double > @llvm.ve.vl.vmv.vsvmvl (i32 31 , <256 x double > %2 , <256 x i1 > undef , <256 x double > %2 , i32 128 )
130
+ tail call void @llvm.ve.vl.vst.vssl (<256 x double > %3 , i64 8 , i8* %0 , i32 256 )
131
+ ret void
132
+ }
0 commit comments