1 /* 2 * Double-precision e^x function. 3 * 4 * Copyright (c) 2018, Arm Limited. 5 * SPDX-License-Identifier: MIT 6 */ 7 8 #include <math.h> 9 #include <stdint.h> 10 #include "libm.h" 11 #include "exp_data.h" 12 13 #define N (1 << EXP_TABLE_BITS) 14 #define InvLn2N __exp_data.invln2N 15 #define NegLn2hiN __exp_data.negln2hiN 16 #define NegLn2loN __exp_data.negln2loN 17 #define Shift __exp_data.shift 18 #define T __exp_data.tab 19 #define C2 __exp_data.poly[5 - EXP_POLY_ORDER] 20 #define C3 __exp_data.poly[6 - EXP_POLY_ORDER] 21 #define C4 __exp_data.poly[7 - EXP_POLY_ORDER] 22 #define C5 __exp_data.poly[8 - EXP_POLY_ORDER] 23 24 /* Handle cases that may overflow or underflow when computing the result that 25 is scale*(1+TMP) without intermediate rounding. The bit representation of 26 scale is in SBITS, however it has a computed exponent that may have 27 overflown into the sign bit so that needs to be adjusted before using it as 28 a double. (int32_t)KI is the k used in the argument reduction and exponent 29 adjustment of scale, positive k here means the result may overflow and 30 negative k means the result may underflow. */ 31 static inline double specialcase(double_t tmp, uint64_t sbits, uint64_t ki) 32 { 33 double_t scale, y; 34 35 if ((ki & 0x80000000) == 0) { 36 /* k > 0, the exponent of scale might have overflowed by <= 460. */ 37 sbits -= 1009ull << 52; 38 scale = asdouble(sbits); 39 y = 0x1p1009 * (scale + scale * tmp); 40 return eval_as_double(y); 41 } 42 /* k < 0, need special care in the subnormal range. */ 43 sbits += 1022ull << 52; 44 scale = asdouble(sbits); 45 y = scale + scale * tmp; 46 if (y < 1.0) { 47 /* Round y to the right precision before scaling it into the subnormal 48 range to avoid double rounding that can cause 0.5+E/2 ulp error where 49 E is the worst-case ulp error outside the subnormal range. So this 50 is only useful if the goal is better than 1 ulp worst-case error. */ 51 double_t hi, lo; 52 lo = scale - y + scale * tmp; 53 hi = 1.0 + y; 54 lo = 1.0 - hi + y + lo; 55 y = eval_as_double(hi + lo) - 1.0; 56 /* Avoid -0.0 with downward rounding. */ 57 if (WANT_ROUNDING && y == 0.0) 58 y = 0.0; 59 /* The underflow exception needs to be signaled explicitly. */ 60 fp_force_eval(fp_barrier(0x1p-1022) * 0x1p-1022); 61 } 62 y = 0x1p-1022 * y; 63 return eval_as_double(y); 64 } 65 66 /* Top 12 bits of a double (sign and exponent bits). */ 67 static inline uint32_t top12(double x) 68 { 69 return asuint64(x) >> 52; 70 } 71 72 double exp(double x) 73 { 74 uint32_t abstop; 75 uint64_t ki, idx, top, sbits; 76 double_t kd, z, r, r2, scale, tail, tmp; 77 78 abstop = top12(x) & 0x7ff; 79 if (predict_false(abstop - top12(0x1p-54) >= top12(512.0) - top12(0x1p-54))) { 80 if (abstop - top12(0x1p-54) >= 0x80000000) 81 /* Avoid spurious underflow for tiny x. */ 82 /* Note: 0 is common input. */ 83 return WANT_ROUNDING ? 1.0 + x : 1.0; 84 if (abstop >= top12(1024.0)) { 85 if (asuint64(x) == asuint64(-INFINITY)) 86 return 0.0; 87 if (abstop >= top12(INFINITY)) 88 return 1.0 + x; 89 if (asuint64(x) >> 63) 90 return __math_uflow(0); 91 else 92 return __math_oflow(0); 93 } 94 /* Large x is special cased below. */ 95 abstop = 0; 96 } 97 98 /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */ 99 /* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */ 100 z = InvLn2N * x; 101 #if TOINT_INTRINSICS 102 kd = roundtoint(z); 103 ki = converttoint(z); 104 #elif EXP_USE_TOINT_NARROW 105 /* z - kd is in [-0.5-2^-16, 0.5] in all rounding modes. */ 106 kd = eval_as_double(z + Shift); 107 ki = asuint64(kd) >> 16; 108 kd = (double_t)(int32_t)ki; 109 #else 110 /* z - kd is in [-1, 1] in non-nearest rounding modes. */ 111 kd = eval_as_double(z + Shift); 112 ki = asuint64(kd); 113 kd -= Shift; 114 #endif 115 r = x + kd * NegLn2hiN + kd * NegLn2loN; 116 /* 2^(k/N) ~= scale * (1 + tail). */ 117 idx = 2 * (ki % N); 118 top = ki << (52 - EXP_TABLE_BITS); 119 tail = asdouble(T[idx]); 120 /* This is only a valid scale when -1023*N < k < 1024*N. */ 121 sbits = T[idx + 1] + top; 122 /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (tail + exp(r) - 1). */ 123 /* Evaluation is optimized assuming superscalar pipelined execution. */ 124 r2 = r * r; 125 /* Without fma the worst case error is 0.25/N ulp larger. */ 126 /* Worst case error is less than 0.5+1.11/N+(abs poly error * 2^53) ulp. */ 127 tmp = tail + r + r2 * (C2 + r * C3) + r2 * r2 * (C4 + r * C5); 128 if (predict_false(abstop == 0)) 129 return specialcase(tmp, sbits, ki); 130 scale = asdouble(sbits); 131 /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there 132 is no spurious underflow here even without fma. */ 133 return eval_as_double(scale + scale * tmp); 134 } 135