11
11
%a.wide = zext <vscale x 8 x i16 > %a to <vscale x 8 x i32 >
12
12
%b.wide = zext <vscale x 8 x i16 > %b to <vscale x 8 x i32 >
13
13
%mult = mul nuw nsw <vscale x 8 x i32 > %a.wide , %b.wide
14
- %partial.reduce = tail call <vscale x 4 x i32 > @llvm.experimental. vector.partial.reduce.add (<vscale x 4 x i32 > %acc , <vscale x 8 x i32 > %mult )
14
+ %partial.reduce = tail call <vscale x 4 x i32 > @llvm.vector.partial.reduce.add (<vscale x 4 x i32 > %acc , <vscale x 8 x i32 > %mult )
15
15
ret <vscale x 4 x i32 > %partial.reduce
16
16
}
17
17
24
24
%a.wide = sext <vscale x 8 x i16 > %a to <vscale x 8 x i32 >
25
25
%b.wide = sext <vscale x 8 x i16 > %b to <vscale x 8 x i32 >
26
26
%mult = mul nuw nsw <vscale x 8 x i32 > %a.wide , %b.wide
27
- %partial.reduce = tail call <vscale x 4 x i32 > @llvm.experimental. vector.partial.reduce.add (<vscale x 4 x i32 > %acc , <vscale x 8 x i32 > %mult )
27
+ %partial.reduce = tail call <vscale x 4 x i32 > @llvm.vector.partial.reduce.add (<vscale x 4 x i32 > %acc , <vscale x 8 x i32 > %mult )
28
28
ret <vscale x 4 x i32 > %partial.reduce
29
29
}
30
30
44
44
%a.wide = zext <16 x i16 > %a to <16 x i32 >
45
45
%b.wide = zext <16 x i16 > %b to <16 x i32 >
46
46
%mult = mul nuw nsw <16 x i32 > %a.wide , %b.wide
47
- %partial.reduce = tail call <8 x i32 > @llvm.experimental. vector.partial.reduce.add (<8 x i32 > %acc , <16 x i32 > %mult )
47
+ %partial.reduce = tail call <8 x i32 > @llvm.vector.partial.reduce.add (<8 x i32 > %acc , <16 x i32 > %mult )
48
48
store <8 x i32 > %partial.reduce , ptr %accptr
49
49
ret void
50
50
}
65
65
%a.wide = sext <16 x i16 > %a to <16 x i32 >
66
66
%b.wide = sext <16 x i16 > %b to <16 x i32 >
67
67
%mult = mul nuw nsw <16 x i32 > %a.wide , %b.wide
68
- %partial.reduce = tail call <8 x i32 > @llvm.experimental. vector.partial.reduce.add (<8 x i32 > %acc , <16 x i32 > %mult )
68
+ %partial.reduce = tail call <8 x i32 > @llvm.vector.partial.reduce.add (<8 x i32 > %acc , <16 x i32 > %mult )
69
69
store <8 x i32 > %partial.reduce , ptr %accptr
70
70
ret void
71
71
}
83
83
%a.wide = zext <8 x i16 > %a to <8 x i32 >
84
84
%b.wide = zext <8 x i16 > %b to <8 x i32 >
85
85
%mult = mul nuw nsw <8 x i32 > %a.wide , %b.wide
86
- %partial.reduce = tail call <4 x i32 > @llvm.experimental. vector.partial.reduce.add (<4 x i32 > %acc , <8 x i32 > %mult )
86
+ %partial.reduce = tail call <4 x i32 > @llvm.vector.partial.reduce.add (<4 x i32 > %acc , <8 x i32 > %mult )
87
87
ret <4 x i32 > %partial.reduce
88
88
}
89
89
@@ -100,6 +100,6 @@ entry:
100
100
%a.wide = sext <8 x i16 > %a to <8 x i32 >
101
101
%b.wide = sext <8 x i16 > %b to <8 x i32 >
102
102
%mult = mul nuw nsw <8 x i32 > %a.wide , %b.wide
103
- %partial.reduce = tail call <4 x i32 > @llvm.experimental. vector.partial.reduce.add (<4 x i32 > %acc , <8 x i32 > %mult )
103
+ %partial.reduce = tail call <4 x i32 > @llvm.vector.partial.reduce.add (<4 x i32 > %acc , <8 x i32 > %mult )
104
104
ret <4 x i32 > %partial.reduce
105
105
}
0 commit comments