1 /*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 * This product includes software developed by the University of
12 * California, Lawrence Berkeley Laboratory.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)fpu_mul.c 8.1 (Berkeley) 6/11/93
39 * $NetBSD: fpu_mul.c,v 1.2 1994/11/20 20:52:44 deraadt Exp $
40 */
41
42 #include <sys/cdefs.h>
43
44 /*
45 * Perform an FPU multiply (return x * y).
46 */
47
48 #include <sys/types.h>
49
50 #include "fpu_arith.h"
51 #include "fpu_emu.h"
52 #include "fpu_extern.h"
53
54 /*
55 * The multiplication algorithm for normal numbers is as follows:
56 *
57 * The fraction of the product is built in the usual stepwise fashion.
58 * Each step consists of shifting the accumulator right one bit
59 * (maintaining any guard bits) and, if the next bit in y is set,
60 * adding the multiplicand (x) to the accumulator. Then, in any case,
61 * we advance one bit leftward in y. Algorithmically:
62 *
63 * A = 0;
64 * for (bit = 0; bit < FP_NMANT; bit++) {
65 * sticky |= A & 1, A >>= 1;
66 * if (Y & (1 << bit))
67 * A += X;
68 * }
69 *
70 * (X and Y here represent the mantissas of x and y respectively.)
71 * The resultant accumulator (A) is the product's mantissa. It may
72 * be as large as 11.11111... in binary and hence may need to be
73 * shifted right, but at most one bit.
74 *
75 * Since we do not have efficient multiword arithmetic, we code the
76 * accumulator as four separate words, just like any other mantissa.
77 * We use local `register' variables in the hope that this is faster
78 * than memory. We keep x->fp_mant in locals for the same reason.
79 *
80 * In the algorithm above, the bits in y are inspected one at a time.
81 * We will pick them up 32 at a time and then deal with those 32, one
82 * at a time. Note, however, that we know several things about y:
83 *
84 * - the guard and round bits at the bottom are sure to be zero;
85 *
86 * - often many low bits are zero (y is often from a single or double
87 * precision source);
88 *
89 * - bit FP_NMANT-1 is set, and FP_1*2 fits in a word.
90 *
91 * We can also test for 32-zero-bits swiftly. In this case, the center
92 * part of the loop---setting sticky, shifting A, and not adding---will
93 * run 32 times without adding X to A. We can do a 32-bit shift faster
94 * by simply moving words. Since zeros are common, we optimize this case.
95 * Furthermore, since A is initially zero, we can omit the shift as well
96 * until we reach a nonzero word.
97 */
98 struct fpn *
__fpu_mul(fe)99 __fpu_mul(fe)
100 struct fpemu *fe;
101 {
102 struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2;
103 u_int a3, a2, a1, a0, x3, x2, x1, x0, bit, m;
104 int sticky;
105 FPU_DECL_CARRY
106
107 /*
108 * Put the `heavier' operand on the right (see fpu_emu.h).
109 * Then we will have one of the following cases, taken in the
110 * following order:
111 *
112 * - y = NaN. Implied: if only one is a signalling NaN, y is.
113 * The result is y.
114 * - y = Inf. Implied: x != NaN (is 0, number, or Inf: the NaN
115 * case was taken care of earlier).
116 * If x = 0, the result is NaN. Otherwise the result
117 * is y, with its sign reversed if x is negative.
118 * - x = 0. Implied: y is 0 or number.
119 * The result is 0 (with XORed sign as usual).
120 * - other. Implied: both x and y are numbers.
121 * The result is x * y (XOR sign, multiply bits, add exponents).
122 */
123 ORDER(x, y);
124 if (ISNAN(y))
125 return (y);
126 if (ISINF(y)) {
127 if (ISZERO(x))
128 return (__fpu_newnan(fe));
129 y->fp_sign ^= x->fp_sign;
130 return (y);
131 }
132 if (ISZERO(x)) {
133 x->fp_sign ^= y->fp_sign;
134 return (x);
135 }
136
137 /*
138 * Setup. In the code below, the mask `m' will hold the current
139 * mantissa byte from y. The variable `bit' denotes the bit
140 * within m. We also define some macros to deal with everything.
141 */
142 x3 = x->fp_mant[3];
143 x2 = x->fp_mant[2];
144 x1 = x->fp_mant[1];
145 x0 = x->fp_mant[0];
146 sticky = a3 = a2 = a1 = a0 = 0;
147
148 #define ADD /* A += X */ \
149 FPU_ADDS(a3, a3, x3); \
150 FPU_ADDCS(a2, a2, x2); \
151 FPU_ADDCS(a1, a1, x1); \
152 FPU_ADDC(a0, a0, x0)
153
154 #define SHR1 /* A >>= 1, with sticky */ \
155 sticky |= a3 & 1, a3 = (a3 >> 1) | (a2 << 31), \
156 a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1
157
158 #define SHR32 /* A >>= 32, with sticky */ \
159 sticky |= a3, a3 = a2, a2 = a1, a1 = a0, a0 = 0
160
161 #define STEP /* each 1-bit step of the multiplication */ \
162 SHR1; if (bit & m) { ADD; }; bit <<= 1
163
164 /*
165 * We are ready to begin. The multiply loop runs once for each
166 * of the four 32-bit words. Some words, however, are special.
167 * As noted above, the low order bits of Y are often zero. Even
168 * if not, the first loop can certainly skip the guard bits.
169 * The last word of y has its highest 1-bit in position FP_NMANT-1,
170 * so we stop the loop when we move past that bit.
171 */
172 if ((m = y->fp_mant[3]) == 0) {
173 /* SHR32; */ /* unneeded since A==0 */
174 } else {
175 bit = 1 << FP_NG;
176 do {
177 STEP;
178 } while (bit != 0);
179 }
180 if ((m = y->fp_mant[2]) == 0) {
181 SHR32;
182 } else {
183 bit = 1;
184 do {
185 STEP;
186 } while (bit != 0);
187 }
188 if ((m = y->fp_mant[1]) == 0) {
189 SHR32;
190 } else {
191 bit = 1;
192 do {
193 STEP;
194 } while (bit != 0);
195 }
196 m = y->fp_mant[0]; /* definitely != 0 */
197 bit = 1;
198 do {
199 STEP;
200 } while (bit <= m);
201
202 /*
203 * Done with mantissa calculation. Get exponent and handle
204 * 11.111...1 case, then put result in place. We reuse x since
205 * it already has the right class (FP_NUM).
206 */
207 m = x->fp_exp + y->fp_exp;
208 if (a0 >= FP_2) {
209 SHR1;
210 m++;
211 }
212 x->fp_sign ^= y->fp_sign;
213 x->fp_exp = m;
214 x->fp_sticky = sticky;
215 x->fp_mant[3] = a3;
216 x->fp_mant[2] = a2;
217 x->fp_mant[1] = a1;
218 x->fp_mant[0] = a0;
219 return (x);
220 }
221