Skip to content

Commit 18d7afe

Browse files
committed
Add rvv support for zsymv and active rvv support for zhemv
1 parent 30222d0 commit 18d7afe

File tree

5 files changed

+766
-4
lines changed

5 files changed

+766
-4
lines changed

kernel/riscv64/KERNEL.x280

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -225,10 +225,19 @@ SSYMV_U_KERNEL = symv_U_rvv.c
225225
SSYMV_L_KERNEL = symv_L_rvv.c
226226
DSYMV_U_KERNEL = symv_U_rvv.c
227227
DSYMV_L_KERNEL = symv_L_rvv.c
228-
CSYMV_U_KERNEL = ../generic/zsymv_k.c
229-
CSYMV_L_KERNEL = ../generic/zsymv_k.c
230-
ZSYMV_U_KERNEL = ../generic/zsymv_k.c
231-
ZSYMV_L_KERNEL = ../generic/zsymv_k.c
228+
CSYMV_U_KERNEL = zsymv_U_rvv.c
229+
CSYMV_L_KERNEL = zsymv_L_rvv.c
230+
ZSYMV_U_KERNEL = zsymv_U_rvv.c
231+
ZSYMV_L_KERNEL = zsymv_L_rvv.c
232+
233+
CHEMV_L_KERNEL = zhemv_LM_rvv.c
234+
CHEMV_M_KERNEL = zhemv_LM_rvv.c
235+
CHEMV_U_KERNEL = zhemv_UV_rvv.c
236+
CHEMV_V_KERNEL = zhemv_UV_rvv.c
237+
ZHEMV_L_KERNEL = zhemv_LM_rvv.c
238+
ZHEMV_M_KERNEL = zhemv_LM_rvv.c
239+
ZHEMV_U_KERNEL = zhemv_UV_rvv.c
240+
ZHEMV_V_KERNEL = zhemv_UV_rvv.c
232241

233242
ZHEMMLTCOPY_M = zhemm_ltcopy_rvv_v1.c
234243
ZHEMMUTCOPY_M = zhemm_utcopy_rvv_v1.c

kernel/riscv64/zhemv_LM_rvv.c

Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
/***************************************************************************
2+
Copyright (c) 2013, The OpenBLAS Project
3+
All rights reserved.
4+
Redistribution and use in source and binary forms, with or without
5+
modification, are permitted provided that the following conditions are
6+
met:
7+
1. Redistributions of source code must retain the above copyright
8+
notice, this list of conditions and the following disclaimer.
9+
2. Redistributions in binary form must reproduce the above copyright
10+
notice, this list of conditions and the following disclaimer in
11+
the documentation and/or other materials provided with the
12+
distribution.
13+
3. Neither the name of the OpenBLAS project nor the names of
14+
its contributors may be used to endorse or promote products
15+
derived from this software without specific prior written permission.
16+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19+
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
20+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25+
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26+
*****************************************************************************/
27+
28+
#include "common.h"
29+
#if !defined(DOUBLE)
30+
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
31+
#define VSETVL_MAX __riscv_vsetvlmax_e32m1()
32+
#define FLOAT_V_T vfloat32m4_t
33+
#define FLOAT_V_T_M1 vfloat32m1_t
34+
#define VFMVFS_FLOAT __riscv_vfmv_f_s_f32m1_f32
35+
#define VLSEV_FLOAT __riscv_vlse32_v_f32m4
36+
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4
37+
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1
38+
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4
39+
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
40+
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
41+
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
42+
#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4
43+
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
44+
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4
45+
#else
46+
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
47+
#define VSETVL_MAX __riscv_vsetvlmax_e64m1()
48+
#define FLOAT_V_T vfloat64m4_t
49+
#define FLOAT_V_T_M1 vfloat64m1_t
50+
#define VFMVFS_FLOAT __riscv_vfmv_f_s_f64m1_f64
51+
#define VLSEV_FLOAT __riscv_vlse64_v_f64m4
52+
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4
53+
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1
54+
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4
55+
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
56+
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
57+
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
58+
#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4
59+
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
60+
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4
61+
#endif
62+
63+
int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){
64+
BLASLONG i, j, k;
65+
BLASLONG ix, iy, ia;
66+
BLASLONG jx, jy, ja;
67+
FLOAT temp_r1, temp_i1;
68+
FLOAT temp_r2, temp_i2;
69+
FLOAT *a_ptr = a;
70+
unsigned int gvl = 0;
71+
FLOAT_V_T_M1 v_res, v_z0;
72+
gvl = VSETVL_MAX;
73+
v_res = VFMVVF_FLOAT_M1(0, gvl);
74+
v_z0 = VFMVVF_FLOAT_M1(0, gvl);
75+
76+
FLOAT_V_T va0, va1, vx0, vx1, vy0, vy1, vr0, vr1;
77+
BLASLONG stride_x, stride_y, stride_a, inc_xv, inc_yv, inc_av, len, lda2;
78+
79+
BLASLONG inc_x2 = incx * 2;
80+
BLASLONG inc_y2 = incy * 2;
81+
stride_x = inc_x2 * sizeof(FLOAT);
82+
stride_y = inc_y2 * sizeof(FLOAT);
83+
stride_a = 2 * sizeof(FLOAT);
84+
lda2 = lda * 2;
85+
86+
jx = 0;
87+
jy = 0;
88+
ja = 0;
89+
for(j = 0; j < offset; j++){
90+
temp_r1 = alpha_r * x[jx] - alpha_i * x[jx+1];;
91+
temp_i1 = alpha_r * x[jx+1] + alpha_i * x[jx];
92+
temp_r2 = 0;
93+
temp_i2 = 0;
94+
y[jy] += temp_r1 * a_ptr[ja];
95+
y[jy+1] += temp_i1 * a_ptr[ja];
96+
ix = jx + inc_x2;
97+
iy = jy + inc_y2;
98+
ia = ja + 2;
99+
i = j + 1;
100+
len = m - i;
101+
if(len > 0){
102+
gvl = VSETVL(len);
103+
inc_xv = incx * gvl * 2;
104+
inc_yv = incy * gvl * 2;
105+
inc_av = gvl * 2;
106+
vr0 = VFMVVF_FLOAT(0, gvl);
107+
vr1 = VFMVVF_FLOAT(0, gvl);
108+
for(k = 0; k < len / gvl; k++){
109+
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl);
110+
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl);
111+
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl);
112+
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl);
113+
#ifndef HEMVREV
114+
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl);
115+
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl);
116+
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl);
117+
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl);
118+
#else
119+
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl);
120+
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl);
121+
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl);
122+
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl);
123+
#endif
124+
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl);
125+
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl);
126+
127+
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl);
128+
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl);
129+
#ifndef HEMVREV
130+
vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl);
131+
vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl);
132+
vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl);
133+
vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl);
134+
#else
135+
vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl);
136+
vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl);
137+
vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl);
138+
vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl);
139+
140+
#endif
141+
i += gvl;
142+
ix += inc_xv;
143+
iy += inc_yv;
144+
ia += inc_av;
145+
}
146+
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl);
147+
temp_r2 = VFMVFS_FLOAT(v_res);
148+
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl);
149+
temp_i2 = VFMVFS_FLOAT(v_res);
150+
if(i < m){
151+
gvl = VSETVL(m-i);
152+
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl);
153+
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl);
154+
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl);
155+
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl);
156+
#ifndef HEMVREV
157+
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl);
158+
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl);
159+
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl);
160+
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl);
161+
#else
162+
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl);
163+
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl);
164+
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl);
165+
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl);
166+
#endif
167+
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl);
168+
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl);
169+
170+
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl);
171+
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl);
172+
#ifndef HEMVREV
173+
vr0 = VFMULVV_FLOAT(vx0, va0, gvl);
174+
vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl);
175+
vr1 = VFMULVV_FLOAT(vx1, va0, gvl);
176+
vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl);
177+
#else
178+
vr0 = VFMULVV_FLOAT(vx0, va0, gvl);
179+
vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl);
180+
vr1 = VFMULVV_FLOAT(vx1, va0, gvl);
181+
vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl);
182+
#endif
183+
184+
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl);
185+
temp_r2 += VFMVFS_FLOAT(v_res);
186+
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl);
187+
temp_i2 += VFMVFS_FLOAT(v_res);
188+
}
189+
}
190+
y[jy] += alpha_r * temp_r2 - alpha_i * temp_i2;
191+
y[jy+1] += alpha_r * temp_i2 + alpha_i * temp_r2;
192+
jx += inc_x2;
193+
jy += inc_y2;
194+
ja += 2;
195+
a_ptr += lda2;
196+
}
197+
return(0);
198+
}

0 commit comments

Comments
 (0)