4
4
; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
5
5
; RUN: -verify-machineinstrs | FileCheck %s
6
6
7
- define void @vadd_vint16m1 (<vscale x 4 x i16 > * %pc , <vscale x 4 x i16 > * %pa , <vscale x 4 x i16 > * %pb ) nounwind {
7
+ define void @vadd_vint16m1 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
8
8
; CHECK-LABEL: vadd_vint16m1:
9
9
; CHECK: # %bb.0:
10
10
; CHECK-NEXT: vl1re16.v v8, (a1)
@@ -13,14 +13,14 @@ define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vs
13
13
; CHECK-NEXT: vadd.vv v8, v8, v9
14
14
; CHECK-NEXT: vs1r.v v8, (a0)
15
15
; CHECK-NEXT: ret
16
- %va = load <vscale x 4 x i16 >, <vscale x 4 x i16 >* %pa
17
- %vb = load <vscale x 4 x i16 >, <vscale x 4 x i16 >* %pb
16
+ %va = load <vscale x 4 x i16 >, ptr %pa
17
+ %vb = load <vscale x 4 x i16 >, ptr %pb
18
18
%vc = add <vscale x 4 x i16 > %va , %vb
19
- store <vscale x 4 x i16 > %vc , <vscale x 4 x i16 > * %pc
19
+ store <vscale x 4 x i16 > %vc , ptr %pc
20
20
ret void
21
21
}
22
22
23
- define void @vadd_vint16m2 (<vscale x 8 x i16 > * %pc , <vscale x 8 x i16 > * %pa , <vscale x 8 x i16 > * %pb ) nounwind {
23
+ define void @vadd_vint16m2 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
24
24
; CHECK-LABEL: vadd_vint16m2:
25
25
; CHECK: # %bb.0:
26
26
; CHECK-NEXT: vl2re16.v v8, (a1)
@@ -29,14 +29,14 @@ define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vs
29
29
; CHECK-NEXT: vadd.vv v8, v8, v10
30
30
; CHECK-NEXT: vs2r.v v8, (a0)
31
31
; CHECK-NEXT: ret
32
- %va = load <vscale x 8 x i16 >, <vscale x 8 x i16 >* %pa
33
- %vb = load <vscale x 8 x i16 >, <vscale x 8 x i16 >* %pb
32
+ %va = load <vscale x 8 x i16 >, ptr %pa
33
+ %vb = load <vscale x 8 x i16 >, ptr %pb
34
34
%vc = add <vscale x 8 x i16 > %va , %vb
35
- store <vscale x 8 x i16 > %vc , <vscale x 8 x i16 > * %pc
35
+ store <vscale x 8 x i16 > %vc , ptr %pc
36
36
ret void
37
37
}
38
38
39
- define void @vadd_vint16m4 (<vscale x 16 x i16 > * %pc , <vscale x 16 x i16 > * %pa , <vscale x 16 x i16 > * %pb ) nounwind {
39
+ define void @vadd_vint16m4 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
40
40
; CHECK-LABEL: vadd_vint16m4:
41
41
; CHECK: # %bb.0:
42
42
; CHECK-NEXT: vl4re16.v v8, (a1)
@@ -45,14 +45,14 @@ define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <
45
45
; CHECK-NEXT: vadd.vv v8, v8, v12
46
46
; CHECK-NEXT: vs4r.v v8, (a0)
47
47
; CHECK-NEXT: ret
48
- %va = load <vscale x 16 x i16 >, <vscale x 16 x i16 >* %pa
49
- %vb = load <vscale x 16 x i16 >, <vscale x 16 x i16 >* %pb
48
+ %va = load <vscale x 16 x i16 >, ptr %pa
49
+ %vb = load <vscale x 16 x i16 >, ptr %pb
50
50
%vc = add <vscale x 16 x i16 > %va , %vb
51
- store <vscale x 16 x i16 > %vc , <vscale x 16 x i16 > * %pc
51
+ store <vscale x 16 x i16 > %vc , ptr %pc
52
52
ret void
53
53
}
54
54
55
- define void @vadd_vint16m8 (<vscale x 32 x i16 > * %pc , <vscale x 32 x i16 > * %pa , <vscale x 32 x i16 > * %pb ) nounwind {
55
+ define void @vadd_vint16m8 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
56
56
; CHECK-LABEL: vadd_vint16m8:
57
57
; CHECK: # %bb.0:
58
58
; CHECK-NEXT: vl8re16.v v8, (a1)
@@ -61,14 +61,14 @@ define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <
61
61
; CHECK-NEXT: vadd.vv v8, v8, v16
62
62
; CHECK-NEXT: vs8r.v v8, (a0)
63
63
; CHECK-NEXT: ret
64
- %va = load <vscale x 32 x i16 >, <vscale x 32 x i16 >* %pa
65
- %vb = load <vscale x 32 x i16 >, <vscale x 32 x i16 >* %pb
64
+ %va = load <vscale x 32 x i16 >, ptr %pa
65
+ %vb = load <vscale x 32 x i16 >, ptr %pb
66
66
%vc = add <vscale x 32 x i16 > %va , %vb
67
- store <vscale x 32 x i16 > %vc , <vscale x 32 x i16 > * %pc
67
+ store <vscale x 32 x i16 > %vc , ptr %pc
68
68
ret void
69
69
}
70
70
71
- define void @vadd_vint16mf2 (<vscale x 2 x i16 > * %pc , <vscale x 2 x i16 > * %pa , <vscale x 2 x i16 > * %pb ) nounwind {
71
+ define void @vadd_vint16mf2 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
72
72
; CHECK-LABEL: vadd_vint16mf2:
73
73
; CHECK: # %bb.0:
74
74
; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
@@ -77,14 +77,14 @@ define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <v
77
77
; CHECK-NEXT: vadd.vv v8, v8, v9
78
78
; CHECK-NEXT: vse16.v v8, (a0)
79
79
; CHECK-NEXT: ret
80
- %va = load <vscale x 2 x i16 >, <vscale x 2 x i16 >* %pa
81
- %vb = load <vscale x 2 x i16 >, <vscale x 2 x i16 >* %pb
80
+ %va = load <vscale x 2 x i16 >, ptr %pa
81
+ %vb = load <vscale x 2 x i16 >, ptr %pb
82
82
%vc = add <vscale x 2 x i16 > %va , %vb
83
- store <vscale x 2 x i16 > %vc , <vscale x 2 x i16 > * %pc
83
+ store <vscale x 2 x i16 > %vc , ptr %pc
84
84
ret void
85
85
}
86
86
87
- define void @vadd_vint16mf4 (<vscale x 1 x i16 > * %pc , <vscale x 1 x i16 > * %pa , <vscale x 1 x i16 > * %pb ) nounwind {
87
+ define void @vadd_vint16mf4 (ptr %pc , ptr %pa , ptr %pb ) nounwind {
88
88
; CHECK-LABEL: vadd_vint16mf4:
89
89
; CHECK: # %bb.0:
90
90
; CHECK-NEXT: vsetvli a3, zero, e16, mf4, ta, ma
@@ -93,9 +93,9 @@ define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <v
93
93
; CHECK-NEXT: vadd.vv v8, v8, v9
94
94
; CHECK-NEXT: vse16.v v8, (a0)
95
95
; CHECK-NEXT: ret
96
- %va = load <vscale x 1 x i16 >, <vscale x 1 x i16 >* %pa
97
- %vb = load <vscale x 1 x i16 >, <vscale x 1 x i16 >* %pb
96
+ %va = load <vscale x 1 x i16 >, ptr %pa
97
+ %vb = load <vscale x 1 x i16 >, ptr %pb
98
98
%vc = add <vscale x 1 x i16 > %va , %vb
99
- store <vscale x 1 x i16 > %vc , <vscale x 1 x i16 > * %pc
99
+ store <vscale x 1 x i16 > %vc , ptr %pc
100
100
ret void
101
101
}
0 commit comments