Skip to content

Commit 080801d

Browse files
committed
math/aarch64/sve: more updates in exps.
Update comments and coefficient indices for consistency and correctness. No change in codegen. Fix copyright years in inline helper.
1 parent 2238b31 commit 080801d

File tree

4 files changed

+32
-36
lines changed

4 files changed

+32
-36
lines changed

math/aarch64/sve/exp.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,15 @@
1111

1212
static const struct data
1313
{
14-
double poly_even[2];
14+
double c0, c2;
1515
double c1, c3;
1616
double ln2_hi, ln2_lo, inv_ln2, shift, thres;
1717

1818
} data = {
19-
.poly_even = { /* ulp error: 0.53. */
20-
0x1.fffffffffdbcdp-2, 0x1.555573c6a9f7dp-5,
21-
},
22-
.c1 = 0x1.555555555444cp-3,
23-
.c3 = 0x1.1111266d28935p-7,
19+
.c0 = 0x1.fffffffffdbcdp-2,
20+
.c1 = 0x1.555555555444cp-3,
21+
.c2 = 0x1.555573c6a9f7dp-5,
22+
.c3 = 0x1.1111266d28935p-7,
2423
.ln2_hi = 0x1.62e42fefa3800p-1,
2524
.ln2_lo = 0x1.ef35793c76730p-45,
2625
/* 1/ln2. */
@@ -49,13 +48,13 @@ special_case (svbool_t pg, svfloat64_t s, svfloat64_t y, svfloat64_t n)
4948
svuint64_t b
5049
= svdup_u64_z (p_sign, SpecialOffset); /* Inactive lanes set to 0. */
5150

52-
/* Set s1 to generate overflow depending on sign of exponent n. */
53-
svfloat64_t s1 = svreinterpret_f64 (
54-
svsubr_x (pg, b, SpecialBias1)); /* 0x70...0 - b. */
55-
/* Offset s to avoid overflow in final result if n is below threshold. */
51+
/* Set s1 to generate overflow depending on sign of exponent n,
52+
ie. s1 = 0x70...0 - b. */
53+
svfloat64_t s1 = svreinterpret_f64 (svsubr_x (pg, b, SpecialBias1));
54+
/* Offset s to avoid overflow in final result if n is below threshold.
55+
ie. s2 = as_u64 (s) - 0x3010...0 + b. */
5656
svfloat64_t s2 = svreinterpret_f64 (
57-
svadd_x (pg, svsub_x (pg, svreinterpret_u64 (s), SpecialBias2),
58-
b)); /* as_u64 (s) - 0x3010...0 + b. */
57+
svadd_x (pg, svsub_x (pg, svreinterpret_u64 (s), SpecialBias2), b));
5958

6059
/* |n| > 1280 => 2^(n) overflows. */
6160
svbool_t p_cmp = svacgt (pg, n, 1280.0);
@@ -104,8 +103,8 @@ svfloat64_t SV_NAME_D1 (exp) (svfloat64_t x, const svbool_t pg)
104103

105104
/* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4 + C3 r^5. */
106105
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
107-
svfloat64_t p01 = svmla_lane (sv_f64 (d->poly_even[0]), r, c13, 0);
108-
svfloat64_t p23 = svmla_lane (sv_f64 (d->poly_even[1]), r, c13, 1);
106+
svfloat64_t p01 = svmla_lane (sv_f64 (d->c0), r, c13, 0);
107+
svfloat64_t p23 = svmla_lane (sv_f64 (d->c2), r, c13, 1);
109108
svfloat64_t p04 = svmla_x (pg, p01, p23, r2);
110109
svfloat64_t y = svmla_x (pg, r, p04, r2);
111110

math/aarch64/sve/exp10.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,19 +13,19 @@
1313

1414
static const struct data
1515
{
16-
double poly_even[2];
17-
double c0, c2, c1, c3, c5;
16+
double c1, c3, c2, c4, c0;
1817
double shift, log10_2, log2_10_hi, log2_10_lo, scale_thres, special_bound;
1918
} data = {
2019
/* Coefficients generated using Remez algorithm.
2120
rel error: 0x1.9fcb9b3p-60
2221
abs error: 0x1.a20d9598p-60 in [ -log10(2)/128, log10(2)/128 ]
2322
max ulp err 0.52 +0.5. */
24-
.poly_even = { 0x1.53524c73cd32ap1, 0x1.2bd77b1361ef6p0 },
23+
.c0 = 0x1.26bb1bbb55516p1,
24+
.c1 = 0x1.53524c73cd32ap1,
25+
.c2 = 0x1.0470591daeafbp1,
26+
.c3 = 0x1.2bd77b1361ef6p0,
27+
.c4 = 0x1.142b5d54e9621p-1,
2528
/* 1.5*2^46+1023. This value is further explained below. */
26-
.c1 = 0x1.0470591daeafbp1,
27-
.c3 = 0x1.142b5d54e9621p-1,
28-
.c5 = 0x1.26bb1bbb55516p1,
2929
.shift = 0x1.800000000ffc0p+46,
3030
.log10_2 = 0x1.a934f0979a371p1, /* 1/log2(10). */
3131
.log2_10_hi = 0x1.34413509f79ffp-2, /* log2(10). */
@@ -95,14 +95,14 @@ svfloat64_t SV_NAME_D1 (exp10) (svfloat64_t x, svbool_t pg)
9595
comes at significant performance cost. */
9696
svuint64_t u = svreinterpret_u64 (z);
9797
svfloat64_t scale = svexpa (u);
98-
svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1);
98+
svfloat64_t c24 = svld1rq (svptrue_b64 (), &d->c2);
9999
/* Approximate exp10(r) using polynomial. */
100100
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
101-
svfloat64_t p01 = svmla_lane (sv_f64 (d->poly_even[0]), r, c13, 0);
102-
svfloat64_t p23 = svmla_lane (sv_f64 (d->poly_even[1]), r, c13, 1);
103-
svfloat64_t p04 = svmla_x (pg, p01, p23, r2);
101+
svfloat64_t p12 = svmla_lane (sv_f64 (d->c1), r, c24, 0);
102+
svfloat64_t p34 = svmla_lane (sv_f64 (d->c3), r, c24, 1);
103+
svfloat64_t p14 = svmla_x (pg, p12, p34, r2);
104104

105-
svfloat64_t y = svmla_x (pg, svmul_x (svptrue_b64 (), r, d->c5), r2, p04);
105+
svfloat64_t y = svmla_x (pg, svmul_x (svptrue_b64 (), r, d->c0), r2, p14);
106106

107107
/* Assemble result as exp10(x) = 2^n * exp10(r). If |x| > SpecialBound
108108
multiplication may overflow, so use special case routine. */

math/aarch64/sve/exp2.c

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,15 @@
1616

1717
static const struct data
1818
{
19-
double poly_even[2];
19+
double c0, c2;
2020
double c1, c3;
2121
double shift, big_bound, uoflow_bound;
2222
} data = {
2323
/* Coefficients are computed using Remez algorithm with
2424
minimisation of the absolute error. */
25-
.poly_even = { 0x1.62e42fefa3686p-1, 0x1.c6b09b16de99ap-5,
26-
},
27-
.c1 = 0x1.ebfbdff82c241p-3,
28-
.c3 = 0x1.3b2abf5571ad8p-7,
29-
.shift = 0x1.8p52 / N,
30-
.uoflow_bound = UOFlowBound,
25+
.c0 = 0x1.62e42fefa3686p-1, .c1 = 0x1.ebfbdff82c241p-3,
26+
.c2 = 0x1.c6b09b16de99ap-5, .c3 = 0x1.3b2abf5571ad8p-7,
27+
.shift = 0x1.8p52 / N, .uoflow_bound = UOFlowBound,
3128
.big_bound = BigBound,
3229
};
3330

@@ -93,10 +90,10 @@ svfloat64_t SV_NAME_D1 (exp2) (svfloat64_t x, svbool_t pg)
9390

9491
svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1);
9592
/* Approximate exp2(r) using polynomial. */
96-
/* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4 + C3 r^5. */
93+
/* y = exp2(r) - 1 ~= C0 r + C1 r^2 + C2 r^3 + C3 r^4. */
9794
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
98-
svfloat64_t p01 = svmla_lane (sv_f64 (d->poly_even[0]), r, c13, 0);
99-
svfloat64_t p23 = svmla_lane (sv_f64 (d->poly_even[1]), r, c13, 1);
95+
svfloat64_t p01 = svmla_lane (sv_f64 (d->c0), r, c13, 0);
96+
svfloat64_t p23 = svmla_lane (sv_f64 (d->c2), r, c13, 1);
10097
svfloat64_t p = svmla_x (pg, p01, p23, r2);
10198
svfloat64_t y = svmul_x (svptrue_b64 (), r, p);
10299
/* Assemble exp2(x) = exp2(r) * scale. */

math/aarch64/sve/sv_expf_inline.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* SVE helper for single-precision routines which calculate exp(x) and do
33
* not need special-case handling
44
*
5-
* Copyright (c) 2023-2024, Arm Limited.
5+
* Copyright (c) 2023-2025, Arm Limited.
66
* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
77
*/
88

0 commit comments

Comments
 (0)