1*f504f610SAugustin Cavalier #include <stdint.h>
2*f504f610SAugustin Cavalier #include <float.h>
3*f504f610SAugustin Cavalier #include <math.h>
4*f504f610SAugustin Cavalier #include "atomic.h"
5*f504f610SAugustin Cavalier
6*f504f610SAugustin Cavalier #define ASUINT64(x) ((union {double f; uint64_t i;}){x}).i
7*f504f610SAugustin Cavalier #define ZEROINFNAN (0x7ff-0x3ff-52-1)
8*f504f610SAugustin Cavalier
9*f504f610SAugustin Cavalier struct num { uint64_t m; int e; int sign; };
10*f504f610SAugustin Cavalier
normalize(double x)11*f504f610SAugustin Cavalier static struct num normalize(double x)
12*f504f610SAugustin Cavalier {
13*f504f610SAugustin Cavalier uint64_t ix = ASUINT64(x);
14*f504f610SAugustin Cavalier int e = ix>>52;
15*f504f610SAugustin Cavalier int sign = e & 0x800;
16*f504f610SAugustin Cavalier e &= 0x7ff;
17*f504f610SAugustin Cavalier if (!e) {
18*f504f610SAugustin Cavalier ix = ASUINT64(x*0x1p63);
19*f504f610SAugustin Cavalier e = ix>>52 & 0x7ff;
20*f504f610SAugustin Cavalier e = e ? e-63 : 0x800;
21*f504f610SAugustin Cavalier }
22*f504f610SAugustin Cavalier ix &= (1ull<<52)-1;
23*f504f610SAugustin Cavalier ix |= 1ull<<52;
24*f504f610SAugustin Cavalier ix <<= 1;
25*f504f610SAugustin Cavalier e -= 0x3ff + 52 + 1;
26*f504f610SAugustin Cavalier return (struct num){ix,e,sign};
27*f504f610SAugustin Cavalier }
28*f504f610SAugustin Cavalier
mul(uint64_t * hi,uint64_t * lo,uint64_t x,uint64_t y)29*f504f610SAugustin Cavalier static void mul(uint64_t *hi, uint64_t *lo, uint64_t x, uint64_t y)
30*f504f610SAugustin Cavalier {
31*f504f610SAugustin Cavalier uint64_t t1,t2,t3;
32*f504f610SAugustin Cavalier uint64_t xlo = (uint32_t)x, xhi = x>>32;
33*f504f610SAugustin Cavalier uint64_t ylo = (uint32_t)y, yhi = y>>32;
34*f504f610SAugustin Cavalier
35*f504f610SAugustin Cavalier t1 = xlo*ylo;
36*f504f610SAugustin Cavalier t2 = xlo*yhi + xhi*ylo;
37*f504f610SAugustin Cavalier t3 = xhi*yhi;
38*f504f610SAugustin Cavalier *lo = t1 + (t2<<32);
39*f504f610SAugustin Cavalier *hi = t3 + (t2>>32) + (t1 > *lo);
40*f504f610SAugustin Cavalier }
41*f504f610SAugustin Cavalier
fma(double x,double y,double z)42*f504f610SAugustin Cavalier double fma(double x, double y, double z)
43*f504f610SAugustin Cavalier {
44*f504f610SAugustin Cavalier #pragma STDC FENV_ACCESS ON
45*f504f610SAugustin Cavalier
46*f504f610SAugustin Cavalier /* normalize so top 10bits and last bit are 0 */
47*f504f610SAugustin Cavalier struct num nx, ny, nz;
48*f504f610SAugustin Cavalier nx = normalize(x);
49*f504f610SAugustin Cavalier ny = normalize(y);
50*f504f610SAugustin Cavalier nz = normalize(z);
51*f504f610SAugustin Cavalier
52*f504f610SAugustin Cavalier if (nx.e >= ZEROINFNAN || ny.e >= ZEROINFNAN)
53*f504f610SAugustin Cavalier return x*y + z;
54*f504f610SAugustin Cavalier if (nz.e >= ZEROINFNAN) {
55*f504f610SAugustin Cavalier if (nz.e > ZEROINFNAN) /* z==0 */
56*f504f610SAugustin Cavalier return x*y + z;
57*f504f610SAugustin Cavalier return z;
58*f504f610SAugustin Cavalier }
59*f504f610SAugustin Cavalier
60*f504f610SAugustin Cavalier /* mul: r = x*y */
61*f504f610SAugustin Cavalier uint64_t rhi, rlo, zhi, zlo;
62*f504f610SAugustin Cavalier mul(&rhi, &rlo, nx.m, ny.m);
63*f504f610SAugustin Cavalier /* either top 20 or 21 bits of rhi and last 2 bits of rlo are 0 */
64*f504f610SAugustin Cavalier
65*f504f610SAugustin Cavalier /* align exponents */
66*f504f610SAugustin Cavalier int e = nx.e + ny.e;
67*f504f610SAugustin Cavalier int d = nz.e - e;
68*f504f610SAugustin Cavalier /* shift bits z<<=kz, r>>=kr, so kz+kr == d, set e = e+kr (== ez-kz) */
69*f504f610SAugustin Cavalier if (d > 0) {
70*f504f610SAugustin Cavalier if (d < 64) {
71*f504f610SAugustin Cavalier zlo = nz.m<<d;
72*f504f610SAugustin Cavalier zhi = nz.m>>64-d;
73*f504f610SAugustin Cavalier } else {
74*f504f610SAugustin Cavalier zlo = 0;
75*f504f610SAugustin Cavalier zhi = nz.m;
76*f504f610SAugustin Cavalier e = nz.e - 64;
77*f504f610SAugustin Cavalier d -= 64;
78*f504f610SAugustin Cavalier if (d == 0) {
79*f504f610SAugustin Cavalier } else if (d < 64) {
80*f504f610SAugustin Cavalier rlo = rhi<<64-d | rlo>>d | !!(rlo<<64-d);
81*f504f610SAugustin Cavalier rhi = rhi>>d;
82*f504f610SAugustin Cavalier } else {
83*f504f610SAugustin Cavalier rlo = 1;
84*f504f610SAugustin Cavalier rhi = 0;
85*f504f610SAugustin Cavalier }
86*f504f610SAugustin Cavalier }
87*f504f610SAugustin Cavalier } else {
88*f504f610SAugustin Cavalier zhi = 0;
89*f504f610SAugustin Cavalier d = -d;
90*f504f610SAugustin Cavalier if (d == 0) {
91*f504f610SAugustin Cavalier zlo = nz.m;
92*f504f610SAugustin Cavalier } else if (d < 64) {
93*f504f610SAugustin Cavalier zlo = nz.m>>d | !!(nz.m<<64-d);
94*f504f610SAugustin Cavalier } else {
95*f504f610SAugustin Cavalier zlo = 1;
96*f504f610SAugustin Cavalier }
97*f504f610SAugustin Cavalier }
98*f504f610SAugustin Cavalier
99*f504f610SAugustin Cavalier /* add */
100*f504f610SAugustin Cavalier int sign = nx.sign^ny.sign;
101*f504f610SAugustin Cavalier int samesign = !(sign^nz.sign);
102*f504f610SAugustin Cavalier int nonzero = 1;
103*f504f610SAugustin Cavalier if (samesign) {
104*f504f610SAugustin Cavalier /* r += z */
105*f504f610SAugustin Cavalier rlo += zlo;
106*f504f610SAugustin Cavalier rhi += zhi + (rlo < zlo);
107*f504f610SAugustin Cavalier } else {
108*f504f610SAugustin Cavalier /* r -= z */
109*f504f610SAugustin Cavalier uint64_t t = rlo;
110*f504f610SAugustin Cavalier rlo -= zlo;
111*f504f610SAugustin Cavalier rhi = rhi - zhi - (t < rlo);
112*f504f610SAugustin Cavalier if (rhi>>63) {
113*f504f610SAugustin Cavalier rlo = -rlo;
114*f504f610SAugustin Cavalier rhi = -rhi-!!rlo;
115*f504f610SAugustin Cavalier sign = !sign;
116*f504f610SAugustin Cavalier }
117*f504f610SAugustin Cavalier nonzero = !!rhi;
118*f504f610SAugustin Cavalier }
119*f504f610SAugustin Cavalier
120*f504f610SAugustin Cavalier /* set rhi to top 63bit of the result (last bit is sticky) */
121*f504f610SAugustin Cavalier if (nonzero) {
122*f504f610SAugustin Cavalier e += 64;
123*f504f610SAugustin Cavalier d = a_clz_64(rhi)-1;
124*f504f610SAugustin Cavalier /* note: d > 0 */
125*f504f610SAugustin Cavalier rhi = rhi<<d | rlo>>64-d | !!(rlo<<d);
126*f504f610SAugustin Cavalier } else if (rlo) {
127*f504f610SAugustin Cavalier d = a_clz_64(rlo)-1;
128*f504f610SAugustin Cavalier if (d < 0)
129*f504f610SAugustin Cavalier rhi = rlo>>1 | (rlo&1);
130*f504f610SAugustin Cavalier else
131*f504f610SAugustin Cavalier rhi = rlo<<d;
132*f504f610SAugustin Cavalier } else {
133*f504f610SAugustin Cavalier /* exact +-0 */
134*f504f610SAugustin Cavalier return x*y + z;
135*f504f610SAugustin Cavalier }
136*f504f610SAugustin Cavalier e -= d;
137*f504f610SAugustin Cavalier
138*f504f610SAugustin Cavalier /* convert to double */
139*f504f610SAugustin Cavalier int64_t i = rhi; /* i is in [1<<62,(1<<63)-1] */
140*f504f610SAugustin Cavalier if (sign)
141*f504f610SAugustin Cavalier i = -i;
142*f504f610SAugustin Cavalier double r = i; /* |r| is in [0x1p62,0x1p63] */
143*f504f610SAugustin Cavalier
144*f504f610SAugustin Cavalier if (e < -1022-62) {
145*f504f610SAugustin Cavalier /* result is subnormal before rounding */
146*f504f610SAugustin Cavalier if (e == -1022-63) {
147*f504f610SAugustin Cavalier double c = 0x1p63;
148*f504f610SAugustin Cavalier if (sign)
149*f504f610SAugustin Cavalier c = -c;
150*f504f610SAugustin Cavalier if (r == c) {
151*f504f610SAugustin Cavalier /* min normal after rounding, underflow depends
152*f504f610SAugustin Cavalier on arch behaviour which can be imitated by
153*f504f610SAugustin Cavalier a double to float conversion */
154*f504f610SAugustin Cavalier float fltmin = 0x0.ffffff8p-63*FLT_MIN * r;
155*f504f610SAugustin Cavalier return DBL_MIN/FLT_MIN * fltmin;
156*f504f610SAugustin Cavalier }
157*f504f610SAugustin Cavalier /* one bit is lost when scaled, add another top bit to
158*f504f610SAugustin Cavalier only round once at conversion if it is inexact */
159*f504f610SAugustin Cavalier if (rhi << 53) {
160*f504f610SAugustin Cavalier i = rhi>>1 | (rhi&1) | 1ull<<62;
161*f504f610SAugustin Cavalier if (sign)
162*f504f610SAugustin Cavalier i = -i;
163*f504f610SAugustin Cavalier r = i;
164*f504f610SAugustin Cavalier r = 2*r - c; /* remove top bit */
165*f504f610SAugustin Cavalier
166*f504f610SAugustin Cavalier /* raise underflow portably, such that it
167*f504f610SAugustin Cavalier cannot be optimized away */
168*f504f610SAugustin Cavalier {
169*f504f610SAugustin Cavalier double_t tiny = DBL_MIN/FLT_MIN * r;
170*f504f610SAugustin Cavalier r += (double)(tiny*tiny) * (r-r);
171*f504f610SAugustin Cavalier }
172*f504f610SAugustin Cavalier }
173*f504f610SAugustin Cavalier } else {
174*f504f610SAugustin Cavalier /* only round once when scaled */
175*f504f610SAugustin Cavalier d = 10;
176*f504f610SAugustin Cavalier i = ( rhi>>d | !!(rhi<<64-d) ) << d;
177*f504f610SAugustin Cavalier if (sign)
178*f504f610SAugustin Cavalier i = -i;
179*f504f610SAugustin Cavalier r = i;
180*f504f610SAugustin Cavalier }
181*f504f610SAugustin Cavalier }
182*f504f610SAugustin Cavalier return scalbn(r, e);
183*f504f610SAugustin Cavalier }
184