Skip to content

Commit cceeee7

Browse files
authored
Add optimized omatcopy_rt
1 parent 0a4546b commit cceeee7

File tree

1 file changed

+371
-0
lines changed

1 file changed

+371
-0
lines changed

kernel/x86_64/omatcopy_rt.c

Lines changed: 371 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,371 @@
1+
/***************************************************************************
2+
Copyright (c) 2021, The OpenBLAS Project
3+
All rights reserved.
4+
Redistribution and use in source and binary forms, with or without
5+
modification, are permitted provided that the following conditions are
6+
met:
7+
1. Redistributions of source code must retain the above copyright
8+
notice, this list of conditions and the following disclaimer.
9+
2. Redistributions in binary form must reproduce the above copyright
10+
notice, this list of conditions and the following disclaimer in
11+
the documentation and/or other materials provided with the
12+
distribution.
13+
3. Neither the name of the OpenBLAS project nor the names of
14+
its contributors may be used to endorse or promote products
15+
derived from this software without specific prior written permission.
16+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19+
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
20+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25+
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26+
*****************************************************************************/
27+
28+
#include "common.h"
29+
30+
#ifdef HAVE_AVX
31+
32+
/* +r: %0 = src, %1 = dst, %2 = src_ld, %3 = dst_ld, %4 = dst_tmp */
33+
/* m: %5 = num_rows, %6 = alpha */
34+
/* xmm15 = alpha */
35+
#define TRANS_4x4(a1_no,a2_no,a3_no,a4_no,t1_no,t2_no,t3_no,t4_no)\
36+
"vunpcklps %%xmm"#a2_no",%%xmm"#a1_no",%%xmm"#t1_no"; vunpckhps %%xmm"#a2_no",%%xmm"#a1_no",%%xmm"#t2_no";"\
37+
"vunpcklps %%xmm"#a4_no",%%xmm"#a3_no",%%xmm"#t3_no"; vunpckhps %%xmm"#a4_no",%%xmm"#a3_no",%%xmm"#t4_no";"\
38+
"vunpcklpd %%xmm"#t3_no",%%xmm"#t1_no",%%xmm"#a1_no"; vunpckhpd %%xmm"#t3_no",%%xmm"#t1_no",%%xmm"#a2_no";"\
39+
"vunpcklpd %%xmm"#t4_no",%%xmm"#t2_no",%%xmm"#a3_no"; vunpckhpd %%xmm"#t4_no",%%xmm"#t2_no",%%xmm"#a4_no";"
40+
41+
#define TRANS_4x8(a1_no,a2_no,a3_no,a4_no,t1_no,t2_no,t3_no,t4_no)\
42+
"vunpcklps %%ymm"#a2_no",%%ymm"#a1_no",%%ymm"#t1_no"; vunpckhps %%ymm"#a2_no",%%ymm"#a1_no",%%ymm"#t2_no";"\
43+
"vunpcklps %%ymm"#a4_no",%%ymm"#a3_no",%%ymm"#t3_no"; vunpckhps %%ymm"#a4_no",%%ymm"#a3_no",%%ymm"#t4_no";"\
44+
"vunpcklpd %%ymm"#t3_no",%%ymm"#t1_no",%%ymm"#a1_no"; vunpckhpd %%ymm"#t3_no",%%ymm"#t1_no",%%ymm"#a2_no";"\
45+
"vunpcklpd %%ymm"#t4_no",%%ymm"#t2_no",%%ymm"#a3_no"; vunpckhpd %%ymm"#t4_no",%%ymm"#t2_no",%%ymm"#a4_no";"
46+
47+
#define SAVE_4x4(b1_no,b2_no,b3_no,b4_no)\
48+
"vmovups %%xmm"#b1_no",(%4); vmovups %%xmm"#b2_no",(%4,%3,1); leaq (%4,%3,2),%4;"\
49+
"vmovups %%xmm"#b3_no",(%4); vmovups %%xmm"#b4_no",(%4,%3,1); leaq (%4,%3,2),%4;"
50+
51+
#define SAVE_4x8(b1_no,b2_no,b3_no,b4_no) SAVE_4x4(b1_no,b2_no,b3_no,b4_no)\
52+
"vextractf128 $1,%%ymm"#b1_no",(%4); vextractf128 $1,%%ymm"#b2_no",(%4,%3,1); leaq (%4,%3,2),%4;"\
53+
"vextractf128 $1,%%ymm"#b3_no",(%4); vextractf128 $1,%%ymm"#b4_no",(%4,%3,1); leaq (%4,%3,2),%4;"
54+
55+
#define COPY_4x16 "movq %1,%4; addq $16,%1;"\
56+
"vmulps (%0),%%ymm15,%%ymm0; vmulps 32(%0),%%ymm15,%%ymm4; vmulps (%0,%2,1),%%ymm15,%%ymm1; vmulps 32(%0,%2,1),%%ymm15,%%ymm5; leaq (%0,%2,2),%0;"\
57+
"vmulps (%0),%%ymm15,%%ymm2; vmulps 32(%0),%%ymm15,%%ymm6; vmulps (%0,%2,1),%%ymm15,%%ymm3; vmulps 32(%0,%2,1),%%ymm15,%%ymm7; leaq (%0,%2,2),%0;"\
58+
TRANS_4x8(0,1,2,3,8,9,10,11) SAVE_4x8(0,1,2,3)\
59+
TRANS_4x8(4,5,6,7,8,9,10,11) SAVE_4x8(4,5,6,7)
60+
61+
#define COPY_4x8 "movq %1,%4; addq $16,%1;"\
62+
"vmulps (%0),%%ymm15,%%ymm0; vmulps (%0,%2,1),%%ymm15,%%ymm1; leaq (%0,%2,2),%0;"\
63+
"vmulps (%0),%%ymm15,%%ymm2; vmulps (%0,%2,1),%%ymm15,%%ymm3; leaq (%0,%2,2),%0;"\
64+
TRANS_4x8(0,1,2,3,8,9,10,11) SAVE_4x8(0,1,2,3)
65+
66+
#define COPY_4x4 "movq %1,%4; addq $16,%1;"\
67+
"vmulps (%0),%%xmm15,%%xmm0; vmulps (%0,%2,1),%%xmm15,%%xmm1; leaq (%0,%2,2),%0;"\
68+
"vmulps (%0),%%xmm15,%%xmm2; vmulps (%0,%2,1),%%xmm15,%%xmm3; leaq (%0,%2,2),%0;"\
69+
TRANS_4x4(0,1,2,3,8,9,10,11) SAVE_4x4(0,1,2,3)
70+
71+
#define COPY_4x2 \
72+
"vmovsd (%0),%%xmm0; vmovhpd (%0,%2,1),%%xmm0,%%xmm0; vmulps %%xmm15,%%xmm0,%%xmm0; leaq (%0,%2,2),%0;"\
73+
"vmovsd (%0),%%xmm1; vmovhpd (%0,%2,1),%%xmm1,%%xmm1; vmulps %%xmm15,%%xmm1,%%xmm1; leaq (%0,%2,2),%0;"\
74+
"vpermilps $216,%%xmm0,%%xmm0; vpermilps $216,%%xmm1,%%xmm1; vunpcklpd %%xmm1,%%xmm0,%%xmm2; vunpckhpd %%xmm1,%%xmm0,%%xmm3;"\
75+
"vmovups %%xmm2,(%1); vmovups %%xmm3,(%1,%3,1); addq $16,%1;"
76+
77+
#define COPY_4x1 \
78+
"vmovss (%0),%%xmm0; vinsertps $16,(%0,%2,1),%%xmm0,%%xmm0; leaq (%0,%2,2),%0;"\
79+
"vinsertps $32,(%0),%%xmm0,%%xmm0; vinsertps $48,(%0,%2,1),%%xmm0,%%xmm0; leaq (%0,%2,2),%0;"\
80+
"vmulps %%xmm15,%%xmm0,%%xmm0; vmovups %%xmm0,(%1); addq $16,%1;"
81+
82+
#define SAVE_2x4(c1_no,c2_no,t1_no,t2_no) \
83+
"vunpcklps %%xmm"#c2_no",%%xmm"#c1_no",%%xmm"#t1_no"; vmulps %%xmm15,%%xmm"#t1_no",%%xmm"#t1_no";"\
84+
"vmovsd %%xmm"#t1_no",(%4); vmovhpd %%xmm"#t1_no",(%4,%3,1); leaq (%4,%3,2),%4;"\
85+
"vunpckhps %%xmm"#c2_no",%%xmm"#c1_no",%%xmm"#t2_no"; vmulps %%xmm15,%%xmm"#t2_no",%%xmm"#t2_no";"\
86+
"vmovsd %%xmm"#t2_no",(%4); vmovhpd %%xmm"#t2_no",(%4,%3,1); leaq (%4,%3,2),%4;"
87+
88+
#define COPY_2x16 "movq %1,%4; addq $8,%1;"\
89+
"vmovups (%0),%%ymm0; vmovups 32(%0),%%ymm2; vmovups (%0,%2,1),%%ymm1; vmovups 32(%0,%2,1),%%ymm3; leaq (%0,%2,2),%0;"\
90+
"vextractf128 $1,%%ymm0,%%xmm4; vextractf128 $1,%%ymm2,%%xmm6; vextractf128 $1,%%ymm1,%%xmm5; vextractf128 $1,%%ymm3,%%xmm7;"\
91+
SAVE_2x4(0,1,8,9) SAVE_2x4(4,5,8,9) SAVE_2x4(2,3,8,9) SAVE_2x4(6,7,8,9)
92+
93+
#define COPY_2x8 "movq %1,%4; addq $8,%1;"\
94+
"vmovups (%0),%%ymm0; vmovups (%0,%2,1),%%ymm1; leaq (%0,%2,2),%0;"\
95+
"vextractf128 $1,%%ymm0,%%xmm2; vextractf128 $1,%%ymm1,%%xmm3;"\
96+
SAVE_2x4(0,1,4,5) SAVE_2x4(2,3,4,5)
97+
98+
#define COPY_2x4 "movq %1,%4; addq $8,%1;"\
99+
"vmovups (%0),%%xmm0; vmovups (%0,%2,1),%%xmm1; leaq (%0,%2,2),%0;"\
100+
SAVE_2x4(0,1,4,5)
101+
102+
#define COPY_2x2 \
103+
"vmovsd (%0),%%xmm0; vmovhpd (%0,%2,1),%%xmm0,%%xmm0; vmulps %%xmm15,%%xmm0,%%xmm0; leaq (%0,%2,2),%0; vpermilps $216,%%xmm0,%%xmm0;"\
104+
"vmovsd %%xmm0,(%1); vmovhpd %%xmm0,(%1,%3,1); addq $8,%1;"
105+
106+
#define COPY_2x1 \
107+
"vmovss (%0),%%xmm0; vinsertps $16,(%0,%2,1),%%xmm0,%%xmm0; vmulps %%xmm15,%%xmm0,%%xmm0; leaq (%0,%2,2),%0; vmovsd %%xmm0,(%1); addq $8,%1;"
108+
109+
#define SAVE_1x4(c1_no)\
110+
"vmulps %%xmm15,%%xmm"#c1_no",%%xmm"#c1_no"; vmovss %%xmm"#c1_no",(%4); vextractps $1,%%xmm"#c1_no",(%4,%3,1); leaq (%4,%3,2),%4;"\
111+
"vextractps $2,%%xmm"#c1_no",(%4); vextractps $3,%%xmm"#c1_no",(%4,%3,1); leaq (%4,%3,2),%4;"
112+
113+
#define COPY_1x16 "movq %1,%4; addq $4,%1;"\
114+
"vmovups (%0),%%xmm1;" SAVE_1x4(1) "vmovups 16(%0),%%xmm2;" SAVE_1x4(2)\
115+
"vmovups 32(%0),%%xmm1;" SAVE_1x4(1) "vmovups 48(%0),%%xmm2;" SAVE_1x4(2) "addq %2,%0;"
116+
117+
#define COPY_1x8 "movq %1,%4; addq $4,%1;"\
118+
"vmovups (%0),%%xmm1;" SAVE_1x4(1) "vmovups 16(%0),%%xmm2;" SAVE_1x4(2) "addq %2,%0;"
119+
120+
#define COPY_1x4 "movq %1,%4; addq $4,%1; vmovups (%0),%%xmm1;" SAVE_1x4(1) "addq %2,%0;"
121+
122+
#define COPY_1x2 "vmovsd (%0),%%xmm1; addq %2,%0; vmulps %%xmm15,%%xmm1,%%xmm1; vmovss %%xmm1,(%1); vextractps $1,%%xmm1,(%1,%3,1); addq $4,%1;"
123+
124+
#define COPY_1x1 "vmulss (%0),%%xmm15,%%xmm1; vmovss %%xmm1,(%1); addq %2,%0; addq $4,%1;"
125+
126+
#define COMPUTE(ndim){\
127+
src = src_base; dst = dst_base;\
128+
__asm__ __volatile__(\
129+
"vbroadcastss %6,%%ymm15; movq %5,%%r11; cmpq $4,%%r11; jb "#ndim"32f;"\
130+
#ndim"31:\n\t"\
131+
COPY_4x##ndim "subq $4,%%r11; cmpq $4,%%r11; jnb "#ndim"31b;"\
132+
#ndim"32:\n\t"\
133+
"cmpq $2,%%r11; jb "#ndim"33f;"\
134+
COPY_2x##ndim "subq $2,%%r11;"\
135+
#ndim"33:\n\t"\
136+
"testq %%r11,%%r11; jz "#ndim"34f;"\
137+
COPY_1x##ndim "subq $1,%%r11;"\
138+
#ndim"34:\n\t"\
139+
:"+r"(src),"+r"(dst),"+r"(src_ld_bytes),"+r"(dst_ld_bytes),"+r"(dst_tmp):"m"(num_rows),"m"(ALPHA):"r11","cc","memory"\
140+
,"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\
141+
}
142+
int CNAME(BLASLONG rows, BLASLONG cols, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *b, BLASLONG ldb){
143+
float *src, *dst, *dst_tmp, *src_base, *dst_base;
144+
uint64_t src_ld_bytes = (uint64_t)lda * sizeof(float), dst_ld_bytes = (uint64_t)ldb * sizeof(float), num_rows = 0;
145+
BLASLONG cols_left, rows_done; float ALPHA = alpha;
146+
if(ALPHA==0.0){
147+
dst_base = b;
148+
for(cols_left=cols;cols_left>0;cols_left--) {memset(dst_base,0,rows*sizeof(float)); dst_base += ldb;}
149+
return 0;
150+
}
151+
for(rows_done=0;rows_done<rows;rows_done+=num_rows){
152+
num_rows = rows-rows_done;
153+
if(num_rows > ROWS_OF_BLOCK) num_rows = ROWS_OF_BLOCK;
154+
cols_left = cols; src_base = a + (int64_t)lda * (int64_t)rows_done; dst_base = b + rows_done;
155+
if(ldb%1024>3 && ldb%1024<1021) for(;cols_left>15;cols_left-=16){COMPUTE(16) src_base += 16; dst_base += 16 * ldb;}
156+
for(;cols_left>7;cols_left-=8){COMPUTE(8) src_base += 8; dst_base += 8 * ldb;}
157+
for(;cols_left>3;cols_left-=4){COMPUTE(4) src_base += 4; dst_base += 4 * ldb;}
158+
for(;cols_left>1;cols_left-=2){COMPUTE(2) src_base += 2; dst_base += 2 * ldb;}
159+
if(cols_left>0){COMPUTE(1) src_base ++; dst_base += ldb;}
160+
}
161+
return 0;
162+
}
163+
164+
#else
165+
166+
int CNAME(BLASLONG rows, BLASLONG cols, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *b, BLASLONG ldb)
167+
168+
BLASLONG i, j;
169+
FLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4;
170+
FLOAT *b_offset, *b_offset1, *b_offset2, *b_offset3, *b_offset4;
171+
172+
if (rows <= 0) return 0;
173+
if (cols <= 0) return 0;
174+
175+
a_offset = a;
176+
b_offset = b;
177+
178+
i = (rows >> 2);
179+
if (i > 0) {
180+
do {
181+
a_offset1 = a_offset;
182+
a_offset2 = a_offset1 + lda;
183+
a_offset3 = a_offset2 + lda;
184+
a_offset4 = a_offset3 + lda;
185+
a_offset += 4 * lda;
186+
187+
b_offset1 = b_offset;
188+
b_offset2 = b_offset1 + ldb;
189+
b_offset3 = b_offset2 + ldb;
190+
b_offset4 = b_offset3 + ldb;
191+
b_offset += 4;
192+
193+
j = (cols >> 2);
194+
if (j > 0) {
195+
do {
196+
/* Column 1 of MAT_B */
197+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha; // Row 1 of MAT_A
198+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
199+
*(b_offset3 + 0) = *(a_offset1 + 2)*alpha;
200+
*(b_offset4 + 0) = *(a_offset1 + 3)*alpha;
201+
202+
/* Column 2 of MAT_B */
203+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha; // Row 2 of MAT_A
204+
*(b_offset2 + 1) = *(a_offset2 + 1)*alpha;
205+
*(b_offset3 + 1) = *(a_offset2 + 2)*alpha;
206+
*(b_offset4 + 1) = *(a_offset2 + 3)*alpha;
207+
208+
/* Column 3 of MAT_B */
209+
*(b_offset1 + 2) = *(a_offset3 + 0)*alpha; // Row 3 of MAT_A
210+
*(b_offset2 + 2) = *(a_offset3 + 1)*alpha;
211+
*(b_offset3 + 2) = *(a_offset3 + 2)*alpha;
212+
*(b_offset4 + 2) = *(a_offset3 + 3)*alpha;
213+
214+
/* Column 4 of MAT_B */
215+
*(b_offset1 + 3) = *(a_offset4 + 0)*alpha; // Row 4 of MAT_A
216+
*(b_offset2 + 3) = *(a_offset4 + 1)*alpha;
217+
*(b_offset3 + 3) = *(a_offset4 + 2)*alpha;
218+
*(b_offset4 + 3) = *(a_offset4 + 3)*alpha;
219+
220+
a_offset1 += 4;
221+
a_offset2 += 4;
222+
a_offset3 += 4;
223+
a_offset4 += 4;
224+
b_offset1 += ldb * 4;
225+
b_offset2 += ldb * 4;
226+
b_offset3 += ldb * 4;
227+
b_offset4 += ldb * 4;
228+
229+
j--;
230+
} while (j > 0);
231+
} // if(j > 0)
232+
233+
234+
if (cols & 2) {
235+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
236+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
237+
238+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha;
239+
*(b_offset2 + 1) = *(a_offset2 + 1)*alpha;
240+
241+
*(b_offset1 + 2) = *(a_offset3 + 0)*alpha;
242+
*(b_offset2 + 2) = *(a_offset3 + 1)*alpha;
243+
244+
*(b_offset1 + 3) = *(a_offset4 + 0)*alpha;
245+
*(b_offset2 + 3) = *(a_offset4 + 1)*alpha;
246+
247+
a_offset1 += 2;
248+
a_offset2 += 2;
249+
a_offset3 += 2;
250+
a_offset4 += 2;
251+
252+
b_offset1 += ldb*2;
253+
254+
}
255+
256+
if (cols & 1) {
257+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
258+
259+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha;
260+
261+
*(b_offset1 + 2) = *(a_offset3 + 0)*alpha;
262+
263+
*(b_offset1 + 3) = *(a_offset4 + 0)*alpha;
264+
}
265+
266+
i--;
267+
} while (i > 0);
268+
}
269+
270+
271+
if (rows & 2) {
272+
a_offset1 = a_offset;
273+
a_offset2 = a_offset1 + lda;
274+
a_offset += 2 * lda;
275+
276+
b_offset1 = b_offset;
277+
b_offset2 = b_offset1 + ldb;
278+
b_offset3 = b_offset2 + ldb;
279+
b_offset4 = b_offset3 + ldb;
280+
b_offset += 2;
281+
282+
j = (cols >> 2);
283+
if (j > 0){
284+
do {
285+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
286+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
287+
*(b_offset3 + 0) = *(a_offset1 + 2)*alpha;
288+
*(b_offset4 + 0) = *(a_offset1 + 3)*alpha;
289+
290+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha;
291+
*(b_offset2 + 1) = *(a_offset2 + 1)*alpha;
292+
*(b_offset3 + 1) = *(a_offset2 + 2)*alpha;
293+
*(b_offset4 + 1) = *(a_offset2 + 3)*alpha;
294+
295+
a_offset1 += 4;
296+
a_offset2 += 4;
297+
b_offset1 += ldb * 4;
298+
b_offset2 += ldb * 4;
299+
b_offset3 += ldb * 4;
300+
b_offset4 += ldb * 4;
301+
302+
j--;
303+
} while (j > 0);
304+
}
305+
306+
307+
if (cols & 2){
308+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
309+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
310+
311+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha;
312+
*(b_offset2 + 1) = *(a_offset2 + 1)*alpha;
313+
314+
a_offset1 += 2;
315+
a_offset2 += 2;
316+
b_offset1 += ldb*2;
317+
318+
}
319+
320+
321+
if (cols & 1){
322+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
323+
*(b_offset1 + 1) = *(a_offset2 + 0)*alpha;
324+
}
325+
} // if (rows & 2)
326+
327+
328+
if (rows & 1) {
329+
a_offset1 = a_offset;
330+
a_offset += lda;
331+
332+
b_offset1 = b_offset;
333+
b_offset2 = b_offset1 + ldb;
334+
b_offset3 = b_offset2 + ldb;
335+
b_offset4 = b_offset3 + ldb;
336+
337+
j = (cols >> 2);
338+
if (j > 0){
339+
do {
340+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
341+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
342+
*(b_offset3 + 0) = *(a_offset1 + 2)*alpha;
343+
*(b_offset4 + 0) = *(a_offset1 + 3)*alpha;
344+
345+
a_offset1 += 4;
346+
b_offset1 += ldb * 4;
347+
b_offset2 += ldb * 4;
348+
b_offset3 += ldb * 4;
349+
b_offset4 += ldb * 4;
350+
351+
j--;
352+
} while (j > 0);
353+
}
354+
355+
if (cols & 2){
356+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
357+
*(b_offset2 + 0) = *(a_offset1 + 1)*alpha;
358+
359+
a_offset1 += 2;
360+
b_offset1 += ldb * 2;
361+
}
362+
363+
if (cols & 1){
364+
*(b_offset1 + 0) = *(a_offset1 + 0)*alpha;
365+
}
366+
}
367+
368+
return 0;
369+
}
370+
371+
#endif

0 commit comments

Comments
 (0)