AArch64: Improve codegen for SVE logs

Reduce memory access by using lanewise MLA and moving constants to struct
and reduce number of MOVPRFXs.
Update maximum ULP error for double log_sve from 1 to 2.
Speedup on Neoverse V1 for log (3%), log2 (5%), and log10 (4%).
This commit is contained in:
Yat Long Poon 2025-01-03 19:07:30 +00:00 committed by Wilco Dijkstra
parent aa6609feb2
commit 32d193a372
4 changed files with 116 additions and 49 deletions

View File

@ -23,28 +23,49 @@
#define Min 0x0010000000000000
#define Max 0x7ff0000000000000
#define Thres 0x7fe0000000000000 /* Max - Min. */
#define Off 0x3fe6900900000000
#define N (1 << V_LOG10_TABLE_BITS)
static svfloat64_t NOINLINE
special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
static const struct data
{
return sv_call_f64 (log10, x, y, special);
double c0, c2;
double c1, c3;
double invln10, log10_2;
double c4;
uint64_t off;
} data = {
.c0 = -0x1.bcb7b1526e506p-3,
.c1 = 0x1.287a7636be1d1p-3,
.c2 = -0x1.bcb7b158af938p-4,
.c3 = 0x1.63c78734e6d07p-4,
.c4 = -0x1.287461742fee4p-4,
.invln10 = 0x1.bcb7b1526e50ep-2,
.log10_2 = 0x1.34413509f79ffp-2,
.off = 0x3fe6900900000000,
};
static svfloat64_t NOINLINE
special_case (svfloat64_t hi, svuint64_t tmp, svfloat64_t y, svfloat64_t r2,
svbool_t special, const struct data *d)
{
svfloat64_t x = svreinterpret_f64 (svadd_x (svptrue_b64 (), tmp, d->off));
return sv_call_f64 (log10, x, svmla_x (svptrue_b64 (), hi, r2, y), special);
}
/* SVE log10 algorithm.
/* Double-precision SVE log10 routine.
Maximum measured error is 2.46 ulps.
SV_NAME_D1 (log10)(0x1.131956cd4b627p+0) got 0x1.fffbdf6eaa669p-6
want 0x1.fffbdf6eaa667p-6. */
svfloat64_t SV_NAME_D1 (log10) (svfloat64_t x, const svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
svuint64_t ix = svreinterpret_u64 (x);
svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thres);
/* x = 2^k z; where z is in range [Off,2*Off) and exact.
The range is split into N subintervals.
The ith subinterval contains z and c is near its center. */
svuint64_t tmp = svsub_x (pg, ix, Off);
svuint64_t tmp = svsub_x (pg, ix, d->off);
svuint64_t i = svlsr_x (pg, tmp, 51 - V_LOG10_TABLE_BITS);
i = svand_x (pg, i, (N - 1) << 1);
svfloat64_t k = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52));
@ -62,15 +83,19 @@ svfloat64_t SV_NAME_D1 (log10) (svfloat64_t x, const svbool_t pg)
svfloat64_t r = svmad_x (pg, invc, z, -1.0);
/* hi = log(c) + k*log(2). */
svfloat64_t w = svmla_x (pg, logc, r, __v_log10_data.invln10);
svfloat64_t hi = svmla_x (pg, w, k, __v_log10_data.log10_2);
svfloat64_t invln10_log10_2 = svld1rq_f64 (svptrue_b64 (), &d->invln10);
svfloat64_t w = svmla_lane_f64 (logc, r, invln10_log10_2, 0);
svfloat64_t hi = svmla_lane_f64 (w, k, invln10_log10_2, 1);
/* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */
svfloat64_t r2 = svmul_x (pg, r, r);
svfloat64_t y = sv_pw_horner_4_f64_x (pg, r, r2, __v_log10_data.poly);
svfloat64_t odd_coeffs = svld1rq_f64 (svptrue_b64 (), &d->c1);
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
svfloat64_t y = svmla_lane_f64 (sv_f64 (d->c2), r, odd_coeffs, 1);
svfloat64_t p = svmla_lane_f64 (sv_f64 (d->c0), r, odd_coeffs, 0);
y = svmla_x (pg, y, r2, d->c4);
y = svmla_x (pg, p, r2, y);
if (__glibc_unlikely (svptest_any (pg, special)))
return special_case (x, svmla_x (svnot_z (pg, special), hi, r2, y),
special);
return special_case (hi, tmp, y, r2, special, d);
return svmla_x (pg, hi, r2, y);
}

View File

@ -21,15 +21,32 @@
#include "poly_sve_f64.h"
#define N (1 << V_LOG2_TABLE_BITS)
#define Off 0x3fe6900900000000
#define Max (0x7ff0000000000000)
#define Min (0x0010000000000000)
#define Thresh (0x7fe0000000000000) /* Max - Min. */
static svfloat64_t NOINLINE
special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
static const struct data
{
return sv_call_f64 (log2, x, y, cmp);
double c0, c2;
double c1, c3;
double invln2, c4;
uint64_t off;
} data = {
.c0 = -0x1.71547652b83p-1,
.c1 = 0x1.ec709dc340953p-2,
.c2 = -0x1.71547651c8f35p-2,
.c3 = 0x1.2777ebe12dda5p-2,
.c4 = -0x1.ec738d616fe26p-3,
.invln2 = 0x1.71547652b82fep0,
.off = 0x3fe6900900000000,
};
static svfloat64_t NOINLINE
special_case (svfloat64_t w, svuint64_t tmp, svfloat64_t y, svfloat64_t r2,
svbool_t special, const struct data *d)
{
svfloat64_t x = svreinterpret_f64 (svadd_x (svptrue_b64 (), tmp, d->off));
return sv_call_f64 (log2, x, svmla_x (svptrue_b64 (), w, r2, y), special);
}
/* Double-precision SVE log2 routine.
@ -40,13 +57,15 @@ special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
want 0x1.fffb34198d9ddp-5. */
svfloat64_t SV_NAME_D1 (log2) (svfloat64_t x, const svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
svuint64_t ix = svreinterpret_u64 (x);
svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thresh);
/* x = 2^k z; where z is in range [Off,2*Off) and exact.
The range is split into N subintervals.
The ith subinterval contains z and c is near its center. */
svuint64_t tmp = svsub_x (pg, ix, Off);
svuint64_t tmp = svsub_x (pg, ix, d->off);
svuint64_t i = svlsr_x (pg, tmp, 51 - V_LOG2_TABLE_BITS);
i = svand_x (pg, i, (N - 1) << 1);
svfloat64_t k = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52));
@ -59,15 +78,19 @@ svfloat64_t SV_NAME_D1 (log2) (svfloat64_t x, const svbool_t pg)
/* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k. */
svfloat64_t invln2_and_c4 = svld1rq_f64 (svptrue_b64 (), &d->invln2);
svfloat64_t r = svmad_x (pg, invc, z, -1.0);
svfloat64_t w = svmla_x (pg, log2c, r, __v_log2_data.invln2);
svfloat64_t r2 = svmul_x (pg, r, r);
svfloat64_t y = sv_pw_horner_4_f64_x (pg, r, r2, __v_log2_data.poly);
svfloat64_t w = svmla_lane_f64 (log2c, r, invln2_and_c4, 0);
w = svadd_x (pg, k, w);
svfloat64_t odd_coeffs = svld1rq_f64 (svptrue_b64 (), &d->c1);
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
svfloat64_t y = svmla_lane_f64 (sv_f64 (d->c2), r, odd_coeffs, 1);
svfloat64_t p = svmla_lane_f64 (sv_f64 (d->c0), r, odd_coeffs, 0);
y = svmla_lane_f64 (y, r2, invln2_and_c4, 1);
y = svmla_x (pg, p, r2, y);
if (__glibc_unlikely (svptest_any (pg, special)))
return special_case (x, svmla_x (svnot_z (pg, special), w, r2, y),
special);
return special_case (w, tmp, y, r2, special, d);
return svmla_x (pg, w, r2, y);
}

View File

@ -19,39 +19,54 @@
#include "sv_math.h"
#define P(i) sv_f64 (__v_log_data.poly[i])
#define N (1 << V_LOG_TABLE_BITS)
#define Off (0x3fe6900900000000)
#define MaxTop (0x7ff)
#define MinTop (0x001)
#define ThreshTop (0x7fe) /* MaxTop - MinTop. */
#define Max (0x7ff0000000000000)
#define Min (0x0010000000000000)
#define Thresh (0x7fe0000000000000) /* Max - Min. */
static const struct data
{
double c0, c2;
double c1, c3;
double ln2, c4;
uint64_t off;
} data = {
.c0 = -0x1.ffffffffffff7p-2,
.c1 = 0x1.55555555170d4p-2,
.c2 = -0x1.0000000399c27p-2,
.c3 = 0x1.999b2e90e94cap-3,
.c4 = -0x1.554e550bd501ep-3,
.ln2 = 0x1.62e42fefa39efp-1,
.off = 0x3fe6900900000000,
};
static svfloat64_t NOINLINE
special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
special_case (svfloat64_t hi, svuint64_t tmp, svfloat64_t y, svfloat64_t r2,
svbool_t special, const struct data *d)
{
return sv_call_f64 (log, x, y, cmp);
svfloat64_t x = svreinterpret_f64 (svadd_x (svptrue_b64 (), tmp, d->off));
return sv_call_f64 (log, x, svmla_x (svptrue_b64 (), hi, r2, y), special);
}
/* SVE port of AdvSIMD log algorithm.
Maximum measured error is 2.17 ulp:
SV_NAME_D1 (log)(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2
want 0x1.ffffff1cca045p-2. */
/* Double-precision SVE log routine.
Maximum measured error is 2.64 ulp:
SV_NAME_D1 (log)(0x1.95e54bc91a5e2p+184) got 0x1.fffffffe88cacp+6
want 0x1.fffffffe88cafp+6. */
svfloat64_t SV_NAME_D1 (log) (svfloat64_t x, const svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
svuint64_t ix = svreinterpret_u64 (x);
svuint64_t top = svlsr_x (pg, ix, 52);
svbool_t cmp = svcmpge (pg, svsub_x (pg, top, MinTop), sv_u64 (ThreshTop));
svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thresh);
/* x = 2^k z; where z is in range [Off,2*Off) and exact.
The range is split into N subintervals.
The ith subinterval contains z and c is near its center. */
svuint64_t tmp = svsub_x (pg, ix, Off);
svuint64_t tmp = svsub_x (pg, ix, d->off);
/* Calculate table index = (tmp >> (52 - V_LOG_TABLE_BITS)) % N.
The actual value of i is double this due to table layout. */
svuint64_t i
= svand_x (pg, svlsr_x (pg, tmp, (51 - V_LOG_TABLE_BITS)), (N - 1) << 1);
svint64_t k
= svasr_x (pg, svreinterpret_s64 (tmp), 52); /* Arithmetic shift. */
svuint64_t iz = svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52));
svfloat64_t z = svreinterpret_f64 (iz);
/* Lookup in 2 global lists (length N). */
@ -59,18 +74,22 @@ svfloat64_t SV_NAME_D1 (log) (svfloat64_t x, const svbool_t pg)
svfloat64_t logc = svld1_gather_index (pg, &__v_log_data.table[0].logc, i);
/* log(x) = log1p(z/c-1) + log(c) + k*Ln2. */
svfloat64_t r = svmad_x (pg, invc, z, -1);
svfloat64_t kd = svcvt_f64_x (pg, k);
svfloat64_t kd = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52));
/* hi = r + log(c) + k*Ln2. */
svfloat64_t hi = svmla_x (pg, svadd_x (pg, logc, r), kd, __v_log_data.ln2);
svfloat64_t ln2_and_c4 = svld1rq_f64 (svptrue_b64 (), &d->ln2);
svfloat64_t r = svmad_x (pg, invc, z, -1);
svfloat64_t hi = svmla_lane_f64 (logc, kd, ln2_and_c4, 0);
hi = svadd_x (pg, r, hi);
/* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */
svfloat64_t r2 = svmul_x (pg, r, r);
svfloat64_t y = svmla_x (pg, P (2), r, P (3));
svfloat64_t p = svmla_x (pg, P (0), r, P (1));
y = svmla_x (pg, y, r2, P (4));
svfloat64_t odd_coeffs = svld1rq_f64 (svptrue_b64 (), &d->c1);
svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
svfloat64_t y = svmla_lane_f64 (sv_f64 (d->c2), r, odd_coeffs, 1);
svfloat64_t p = svmla_lane_f64 (sv_f64 (d->c0), r, odd_coeffs, 0);
y = svmla_lane_f64 (y, r2, ln2_and_c4, 1);
y = svmla_x (pg, p, r2, y);
if (__glibc_unlikely (svptest_any (pg, cmp)))
return special_case (x, svmla_x (svnot_z (pg, cmp), hi, r2, y), cmp);
if (__glibc_unlikely (svptest_any (pg, special)))
return special_case (hi, tmp, y, r2, special, d);
return svmla_x (pg, hi, r2, y);
}

View File

@ -1494,7 +1494,7 @@ float: 2
ldouble: 1
Function: "log_sve":
double: 1
double: 2
float: 3
Function: "log_towardzero":