|
| 1 | +/*************************************************************************** |
| 2 | +Copyright (c) 2025, The OpenBLAS Project |
| 3 | +All rights reserved. |
| 4 | +Redistribution and use in source and binary forms, with or without |
| 5 | +modification, are permitted provided that the following conditions are |
| 6 | +met: |
| 7 | +1. Redistributions of source code must retain the above copyright |
| 8 | +notice, this list of conditions and the following disclaimer. |
| 9 | +2. Redistributions in binary form must reproduce the above copyright |
| 10 | +notice, this list of conditions and the following disclaimer in |
| 11 | +the documentation and/or other materials provided with the |
| 12 | +distribution. |
| 13 | +3. Neither the name of the OpenBLAS project nor the names of |
| 14 | +its contributors may be used to endorse or promote products |
| 15 | +derived from this software without specific prior written permission. |
| 16 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 17 | +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE |
| 20 | +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 22 | +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| 23 | +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
| 25 | +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | +*****************************************************************************/ |
| 27 | + |
| 28 | +#include "common.h" |
| 29 | + |
| 30 | +#if !defined(DOUBLE) |
| 31 | +#define VSETVL(n) __riscv_vsetvl_e32m1(n) |
| 32 | +#define FLOAT_V_T vfloat32m1_t |
| 33 | +#define FLOAT_VX2_T vfloat32m1x2_t |
| 34 | +#define FLOAT_VX4_T vfloat32m1x4_t |
| 35 | +#define FLOAT_VX8_T vfloat32m1x8_t |
| 36 | +#define VLSEG_FLOAT __riscv_vlse32_v_f32m1 |
| 37 | +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1x2 |
| 38 | +#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1x4 |
| 39 | +#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1x8 |
| 40 | +#define VGET_VX2 __riscv_vget_v_f32m1x2_f32m1 |
| 41 | +#define VGET_VX4 __riscv_vget_v_f32m1x4_f32m1 |
| 42 | +#define VGET_VX8 __riscv_vget_v_f32m1x8_f32m1 |
| 43 | +#define VSET_VX2 __riscv_vset_v_f32m1_f32m1x2 |
| 44 | +#define VSET_VX4 __riscv_vset_v_f32m1_f32m1x4 |
| 45 | +#define VSET_VX8 __riscv_vset_v_f32m1_f32m1x8 |
| 46 | +#define VLEV_FLOAT __riscv_vle32_v_f32m1 |
| 47 | +#define VSEV_FLOAT __riscv_vse32_v_f32m1 |
| 48 | +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1x2 |
| 49 | +#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1x4 |
| 50 | +#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1x8 |
| 51 | +#else |
| 52 | +#define VSETVL(n) __riscv_vsetvl_e64m1(n) |
| 53 | +#define FLOAT_V_T vfloat64m1_t |
| 54 | +#define FLOAT_VX2_T vfloat64m1x2_t |
| 55 | +#define FLOAT_VX4_T vfloat64m1x4_t |
| 56 | +#define FLOAT_VX8_T vfloat64m1x8_t |
| 57 | +#define VLSEG_FLOAT __riscv_vlse64_v_f64m1 |
| 58 | +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1x2 |
| 59 | +#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1x4 |
| 60 | +#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1x8 |
| 61 | +#define VGET_VX2 __riscv_vget_v_f64m1x2_f64m1 |
| 62 | +#define VGET_VX4 __riscv_vget_v_f64m1x4_f64m1 |
| 63 | +#define VGET_VX8 __riscv_vget_v_f64m1x8_f64m1 |
| 64 | +#define VSET_VX2 __riscv_vset_v_f64m1_f64m1x2 |
| 65 | +#define VSET_VX4 __riscv_vset_v_f64m1_f64m1x4 |
| 66 | +#define VSET_VX8 __riscv_vset_v_f64m1_f64m1x8 |
| 67 | +#define VLEV_FLOAT __riscv_vle64_v_f64m1 |
| 68 | +#define VSEV_FLOAT __riscv_vse64_v_f64m1 |
| 69 | +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1x2 |
| 70 | +#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1x4 |
| 71 | +#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1x8 |
| 72 | +#endif |
| 73 | + |
| 74 | +// Optimizes the implementation in ../generic/gemm_ncopy_16.c |
| 75 | + |
| 76 | +int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b) |
| 77 | +{ |
| 78 | + BLASLONG i, j; |
| 79 | + |
| 80 | + FLOAT *a_offset; |
| 81 | + FLOAT *a_offset1, *a_offset2, *a_offset3, *a_offset4; |
| 82 | + FLOAT *a_offset5, *a_offset6, *a_offset7, *a_offset8; |
| 83 | + FLOAT *b_offset; |
| 84 | + |
| 85 | + FLOAT_V_T v1, v2, v3, v4, v5, v6, v7, v8; |
| 86 | + FLOAT_V_T v9, v10, v11, v12, v13, v14, v15, v16; |
| 87 | + FLOAT_VX2_T vx2, vx21; |
| 88 | + FLOAT_VX4_T vx4, vx41; |
| 89 | + FLOAT_VX8_T vx8, vx81; |
| 90 | + |
| 91 | + size_t vl; |
| 92 | + |
| 93 | + //fprintf(stderr, "gemm_ncopy_16 m=%ld n=%ld lda=%ld\n", m, n, lda); |
| 94 | + |
| 95 | + a_offset = a; |
| 96 | + b_offset = b; |
| 97 | + |
| 98 | + j = (n >> 4); |
| 99 | + if (j) { |
| 100 | + vl = VSETVL(8); |
| 101 | + |
| 102 | + do { |
| 103 | + a_offset1 = a_offset; |
| 104 | + a_offset2 = a_offset1 + lda * 8; |
| 105 | + a_offset += 16 * lda; |
| 106 | + |
| 107 | + i = m >> 3; |
| 108 | + if (i) { |
| 109 | + do { |
| 110 | + vx8 = VLSSEG8_FLOAT(a_offset1, lda * sizeof(FLOAT), vl); |
| 111 | + vx81 = VLSSEG8_FLOAT(a_offset2, lda * sizeof(FLOAT), vl); |
| 112 | + |
| 113 | + v1 = VGET_VX8(vx8, 0); |
| 114 | + v2 = VGET_VX8(vx8, 1); |
| 115 | + v3 = VGET_VX8(vx8, 2); |
| 116 | + v4 = VGET_VX8(vx8, 3); |
| 117 | + v5 = VGET_VX8(vx8, 4); |
| 118 | + v6 = VGET_VX8(vx8, 5); |
| 119 | + v7 = VGET_VX8(vx8, 6); |
| 120 | + v8 = VGET_VX8(vx8, 7); |
| 121 | + v9 = VGET_VX8(vx81, 0); |
| 122 | + v10 = VGET_VX8(vx81, 1); |
| 123 | + v11 = VGET_VX8(vx81, 2); |
| 124 | + v12 = VGET_VX8(vx81, 3); |
| 125 | + v13 = VGET_VX8(vx81, 4); |
| 126 | + v14 = VGET_VX8(vx81, 5); |
| 127 | + v15 = VGET_VX8(vx81, 6); |
| 128 | + v16 = VGET_VX8(vx81, 7); |
| 129 | + |
| 130 | + VSEV_FLOAT(b_offset, v1, vl); |
| 131 | + VSEV_FLOAT(b_offset + 8, v9, vl); |
| 132 | + VSEV_FLOAT(b_offset + 16, v2, vl); |
| 133 | + VSEV_FLOAT(b_offset + 24, v10, vl); |
| 134 | + VSEV_FLOAT(b_offset + 32, v3, vl); |
| 135 | + VSEV_FLOAT(b_offset + 40, v11, vl); |
| 136 | + VSEV_FLOAT(b_offset + 48, v4, vl); |
| 137 | + VSEV_FLOAT(b_offset + 56, v12, vl); |
| 138 | + VSEV_FLOAT(b_offset + 64, v5, vl); |
| 139 | + VSEV_FLOAT(b_offset + 72, v13, vl); |
| 140 | + VSEV_FLOAT(b_offset + 80, v6, vl); |
| 141 | + VSEV_FLOAT(b_offset + 88, v14, vl); |
| 142 | + VSEV_FLOAT(b_offset + 96, v7, vl); |
| 143 | + VSEV_FLOAT(b_offset + 104, v15, vl); |
| 144 | + VSEV_FLOAT(b_offset + 112, v8, vl); |
| 145 | + VSEV_FLOAT(b_offset + 120, v16, vl); |
| 146 | + |
| 147 | + a_offset1 += 8; |
| 148 | + a_offset2 += 8; |
| 149 | + b_offset += 128; |
| 150 | + } while (--i); |
| 151 | + } |
| 152 | + |
| 153 | + if (m & 4) { |
| 154 | + vx4 = VLSSEG4_FLOAT(a_offset1, lda * sizeof(FLOAT), vl); |
| 155 | + vx41 = VLSSEG4_FLOAT(a_offset2, lda * sizeof(FLOAT), vl); |
| 156 | + |
| 157 | + v1 = VGET_VX4(vx4, 0); |
| 158 | + v2 = VGET_VX4(vx4, 1); |
| 159 | + v3 = VGET_VX4(vx4, 2); |
| 160 | + v4 = VGET_VX4(vx4, 3); |
| 161 | + v5 = VGET_VX4(vx41, 0); |
| 162 | + v6 = VGET_VX4(vx41, 1); |
| 163 | + v7 = VGET_VX4(vx41, 2); |
| 164 | + v8 = VGET_VX4(vx41, 3); |
| 165 | + |
| 166 | + VSEV_FLOAT(b_offset, v1, vl); |
| 167 | + VSEV_FLOAT(b_offset + 8, v5, vl); |
| 168 | + VSEV_FLOAT(b_offset + 16, v2, vl); |
| 169 | + VSEV_FLOAT(b_offset + 24, v6, vl); |
| 170 | + VSEV_FLOAT(b_offset + 32, v3, vl); |
| 171 | + VSEV_FLOAT(b_offset + 40, v7, vl); |
| 172 | + VSEV_FLOAT(b_offset + 48, v4, vl); |
| 173 | + VSEV_FLOAT(b_offset + 56, v8, vl); |
| 174 | + |
| 175 | + a_offset1 += 4; |
| 176 | + a_offset2 += 4; |
| 177 | + b_offset += 64; |
| 178 | + } |
| 179 | + |
| 180 | + if (m & 2) { |
| 181 | + vx2 = VLSSEG2_FLOAT(a_offset1, lda * sizeof(FLOAT), vl); |
| 182 | + vx21 = VLSSEG2_FLOAT(a_offset2, lda * sizeof(FLOAT), vl); |
| 183 | + |
| 184 | + v1 = VGET_VX2(vx2, 0); |
| 185 | + v2 = VGET_VX2(vx2, 1); |
| 186 | + v3 = VGET_VX2(vx21, 0); |
| 187 | + v4 = VGET_VX2(vx21, 1); |
| 188 | + |
| 189 | + VSEV_FLOAT(b_offset, v1, vl); |
| 190 | + VSEV_FLOAT(b_offset + 8, v3, vl); |
| 191 | + VSEV_FLOAT(b_offset + 16, v2, vl); |
| 192 | + VSEV_FLOAT(b_offset + 24, v4, vl); |
| 193 | + |
| 194 | + a_offset1 += 2; |
| 195 | + a_offset2 += 2; |
| 196 | + b_offset += 32; |
| 197 | + } |
| 198 | + |
| 199 | + if (m & 1) { |
| 200 | + v1 = VLSEG_FLOAT(a_offset1, lda * sizeof(FLOAT), vl); |
| 201 | + v2 = VLSEG_FLOAT(a_offset2, lda * sizeof(FLOAT), vl); |
| 202 | + |
| 203 | + VSEV_FLOAT(b_offset, v1, vl); |
| 204 | + VSEV_FLOAT(b_offset + 8, v2, vl); |
| 205 | + |
| 206 | + b_offset += 16; |
| 207 | + } |
| 208 | + } while (--j); |
| 209 | + } |
| 210 | + |
| 211 | + if (n & 8) { |
| 212 | + a_offset1 = a_offset; |
| 213 | + a_offset2 = a_offset1 + lda; |
| 214 | + a_offset3 = a_offset2 + lda; |
| 215 | + a_offset4 = a_offset3 + lda; |
| 216 | + a_offset5 = a_offset4 + lda; |
| 217 | + a_offset6 = a_offset5 + lda; |
| 218 | + a_offset7 = a_offset6 + lda; |
| 219 | + a_offset8 = a_offset7 + lda; |
| 220 | + a_offset += 8 * lda; |
| 221 | + |
| 222 | + for(i = m; i > 0; i -= vl) { |
| 223 | + vl = VSETVL(i); |
| 224 | + |
| 225 | + v1 = VLEV_FLOAT(a_offset1, vl); |
| 226 | + v2 = VLEV_FLOAT(a_offset2, vl); |
| 227 | + v3 = VLEV_FLOAT(a_offset3, vl); |
| 228 | + v4 = VLEV_FLOAT(a_offset4, vl); |
| 229 | + v5 = VLEV_FLOAT(a_offset5, vl); |
| 230 | + v6 = VLEV_FLOAT(a_offset6, vl); |
| 231 | + v7 = VLEV_FLOAT(a_offset7, vl); |
| 232 | + v8 = VLEV_FLOAT(a_offset8, vl); |
| 233 | + |
| 234 | + vx8 = VSET_VX8(vx8, 0, v1); |
| 235 | + vx8 = VSET_VX8(vx8, 1, v2); |
| 236 | + vx8 = VSET_VX8(vx8, 2, v3); |
| 237 | + vx8 = VSET_VX8(vx8, 3, v4); |
| 238 | + vx8 = VSET_VX8(vx8, 4, v5); |
| 239 | + vx8 = VSET_VX8(vx8, 5, v6); |
| 240 | + vx8 = VSET_VX8(vx8, 6, v7); |
| 241 | + vx8 = VSET_VX8(vx8, 7, v8); |
| 242 | + |
| 243 | + VSSEG8_FLOAT(b_offset, vx8, vl); |
| 244 | + |
| 245 | + a_offset1 += vl; |
| 246 | + a_offset2 += vl; |
| 247 | + a_offset3 += vl; |
| 248 | + a_offset4 += vl; |
| 249 | + a_offset5 += vl; |
| 250 | + a_offset6 += vl; |
| 251 | + a_offset7 += vl; |
| 252 | + a_offset8 += vl; |
| 253 | + b_offset += vl*8; |
| 254 | + } |
| 255 | + } |
| 256 | + |
| 257 | + if (n & 4) { |
| 258 | + a_offset1 = a_offset; |
| 259 | + a_offset2 = a_offset1 + lda; |
| 260 | + a_offset3 = a_offset2 + lda; |
| 261 | + a_offset4 = a_offset3 + lda; |
| 262 | + a_offset += 4 * lda; |
| 263 | + |
| 264 | + for(i = m; i > 0; i -= vl) { |
| 265 | + vl = VSETVL(i); |
| 266 | + |
| 267 | + v1 = VLEV_FLOAT(a_offset1, vl); |
| 268 | + v2 = VLEV_FLOAT(a_offset2, vl); |
| 269 | + v3 = VLEV_FLOAT(a_offset3, vl); |
| 270 | + v4 = VLEV_FLOAT(a_offset4, vl); |
| 271 | + |
| 272 | + vx4 = VSET_VX4(vx4, 0, v1); |
| 273 | + vx4 = VSET_VX4(vx4, 1, v2); |
| 274 | + vx4 = VSET_VX4(vx4, 2, v3); |
| 275 | + vx4 = VSET_VX4(vx4, 3, v4); |
| 276 | + |
| 277 | + VSSEG4_FLOAT(b_offset, vx4, vl); |
| 278 | + |
| 279 | + a_offset1 += vl; |
| 280 | + a_offset2 += vl; |
| 281 | + a_offset3 += vl; |
| 282 | + a_offset4 += vl; |
| 283 | + b_offset += vl*4; |
| 284 | + } |
| 285 | + } |
| 286 | + |
| 287 | + if (n & 2) { |
| 288 | + a_offset1 = a_offset; |
| 289 | + a_offset2 = a_offset1 + lda; |
| 290 | + a_offset += 2 * lda; |
| 291 | + |
| 292 | + for(i = m; i > 0; i -= vl) { |
| 293 | + vl = VSETVL(i); |
| 294 | + |
| 295 | + v1 = VLEV_FLOAT(a_offset1, vl); |
| 296 | + v2 = VLEV_FLOAT(a_offset2, vl); |
| 297 | + |
| 298 | + vx2 = VSET_VX2(vx2, 0, v1); |
| 299 | + vx2 = VSET_VX2(vx2, 1, v2); |
| 300 | + |
| 301 | + VSSEG2_FLOAT(b_offset, vx2, vl); |
| 302 | + |
| 303 | + a_offset1 += vl; |
| 304 | + a_offset2 += vl; |
| 305 | + b_offset += vl*2; |
| 306 | + } |
| 307 | + } |
| 308 | + |
| 309 | + if (n & 1) { |
| 310 | + a_offset1 = a_offset; |
| 311 | + |
| 312 | + for(i = m; i > 0; i -= vl) { |
| 313 | + vl = VSETVL(i); |
| 314 | + |
| 315 | + v1 = VLEV_FLOAT(a_offset1, vl); |
| 316 | + |
| 317 | + VSEV_FLOAT(b_offset, v1, vl); |
| 318 | + |
| 319 | + a_offset1 += vl; |
| 320 | + b_offset += vl; |
| 321 | + } |
| 322 | + } |
| 323 | + |
| 324 | + return 0; |
| 325 | +} |
0 commit comments