1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)fpu_implode.c 8.1 (Berkeley) 6/11/93 39 * $NetBSD: fpu_implode.c,v 1.8 2001/08/26 05:44:46 eeh Exp $ 40 */ 41 42 #include <sys/cdefs.h> 43 44 /* 45 * FPU subroutines: `implode' internal format numbers into the machine's 46 * `packed binary' format. 47 */ 48 49 #include <sys/param.h> 50 #include <stdint.h> 51 52 #ifdef FPU_DEBUG 53 #include <stdio.h> 54 #endif 55 56 #include "fsr.h" 57 #include "ieee.h" 58 #include "instr.h" 59 60 #include "fpu_arith.h" 61 #include "fpu_emu.h" 62 #include "fpu_extern.h" 63 64 static int fpround(struct fpemu *, struct fpn *); 65 static int toinf(struct fpemu *, int); 66 67 #ifdef _KERNEL_MODE 68 extern void panic(const char*, ...); 69 #else 70 #include <OS.h> 71 #endif 72 73 /* 74 * Round a number (algorithm from Motorola MC68882 manual, modified for 75 * our internal format). Set inexact exception if rounding is required. 76 * Return true iff we rounded up. 77 * 78 * After rounding, we discard the guard and round bits by shifting right 79 * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky). 80 * This saves effort later. 81 * 82 * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's 83 * responsibility to fix this if necessary. 84 */ 85 static int 86 fpround(struct fpemu *fe, struct fpn *fp) 87 { 88 uint32_t m0, m1, m2, m3; 89 int gr, s; 90 91 m0 = fp->fp_mant[0]; 92 m1 = fp->fp_mant[1]; 93 m2 = fp->fp_mant[2]; 94 m3 = fp->fp_mant[3]; 95 gr = m3 & 3; 96 s = fp->fp_sticky; 97 98 /* mant >>= FP_NG */ 99 m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG)); 100 m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG)); 101 m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG)); 102 m0 >>= FP_NG; 103 104 if ((gr | s) == 0) /* result is exact: no rounding needed */ 105 goto rounddown; 106 107 fe->fe_cx |= FSR_NX; /* inexact */ 108 109 /* Go to rounddown to round down; break to round up. */ 110 switch (FSR_GET_RD(fe->fe_fsr)) { 111 case FSR_RD_N: 112 default: 113 /* 114 * Round only if guard is set (gr & 2). If guard is set, 115 * but round & sticky both clear, then we want to round 116 * but have a tie, so round to even, i.e., add 1 iff odd. 117 */ 118 if ((gr & 2) == 0) 119 goto rounddown; 120 if ((gr & 1) || fp->fp_sticky || (m3 & 1)) 121 break; 122 goto rounddown; 123 124 case FSR_RD_Z: 125 /* Round towards zero, i.e., down. */ 126 goto rounddown; 127 128 case FSR_RD_NINF: 129 /* Round towards -Inf: up if negative, down if positive. */ 130 if (fp->fp_sign) 131 break; 132 goto rounddown; 133 134 case FSR_RD_PINF: 135 /* Round towards +Inf: up if positive, down otherwise. */ 136 if (!fp->fp_sign) 137 break; 138 goto rounddown; 139 } 140 141 /* Bump low bit of mantissa, with carry. */ 142 FPU_ADDS(m3, m3, 1); 143 FPU_ADDCS(m2, m2, 0); 144 FPU_ADDCS(m1, m1, 0); 145 FPU_ADDC(m0, m0, 0); 146 fp->fp_mant[0] = m0; 147 fp->fp_mant[1] = m1; 148 fp->fp_mant[2] = m2; 149 fp->fp_mant[3] = m3; 150 return (1); 151 152 rounddown: 153 fp->fp_mant[0] = m0; 154 fp->fp_mant[1] = m1; 155 fp->fp_mant[2] = m2; 156 fp->fp_mant[3] = m3; 157 return (0); 158 } 159 160 /* 161 * For overflow: return true if overflow is to go to +/-Inf, according 162 * to the sign of the overflowing result. If false, overflow is to go 163 * to the largest magnitude value instead. 164 */ 165 static int 166 toinf(struct fpemu *fe, int sign) 167 { 168 int inf; 169 170 /* look at rounding direction */ 171 switch (FSR_GET_RD(fe->fe_fsr)) { 172 default: 173 case FSR_RD_N: /* the nearest value is always Inf */ 174 inf = 1; 175 break; 176 177 case FSR_RD_Z: /* toward 0 => never towards Inf */ 178 inf = 0; 179 break; 180 181 case FSR_RD_PINF: /* toward +Inf iff positive */ 182 inf = sign == 0; 183 break; 184 185 case FSR_RD_NINF: /* toward -Inf iff negative */ 186 inf = sign; 187 break; 188 } 189 return (inf); 190 } 191 192 /* 193 * fpn -> int (int value returned as return value). 194 * 195 * N.B.: this conversion always rounds towards zero (this is a peculiarity 196 * of the SPARC instruction set). 197 */ 198 uint32_t 199 __fpu_ftoi(fe, fp) 200 struct fpemu *fe; 201 struct fpn *fp; 202 { 203 uint32_t i; 204 int sign, exp; 205 206 sign = fp->fp_sign; 207 switch (fp->fp_class) { 208 case FPC_ZERO: 209 return (0); 210 211 case FPC_NUM: 212 /* 213 * If exp >= 2^32, overflow. Otherwise shift value right 214 * into last mantissa word (this will not exceed 0xffffffff), 215 * shifting any guard and round bits out into the sticky 216 * bit. Then ``round'' towards zero, i.e., just set an 217 * inexact exception if sticky is set (see round()). 218 * If the result is > 0x80000000, or is positive and equals 219 * 0x80000000, overflow; otherwise the last fraction word 220 * is the result. 221 */ 222 if ((exp = fp->fp_exp) >= 32) 223 break; 224 /* NB: the following includes exp < 0 cases */ 225 if (__fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 226 fe->fe_cx |= FSR_NX; 227 i = fp->fp_mant[3]; 228 if (i >= ((uint32_t)0x80000000 + sign)) 229 break; 230 return (sign ? -i : i); 231 232 default: /* Inf, qNaN, sNaN */ 233 break; 234 } 235 /* overflow: replace any inexact exception with invalid */ 236 fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV; 237 return (0x7fffffff + sign); 238 } 239 240 /* 241 * fpn -> extended int (high bits of int value returned as return value). 242 * 243 * N.B.: this conversion always rounds towards zero (this is a peculiarity 244 * of the SPARC instruction set). 245 */ 246 uint32_t 247 __fpu_ftox(fe, fp, res) 248 struct fpemu *fe; 249 struct fpn *fp; 250 uint32_t *res; 251 { 252 uint64_t i; 253 int sign, exp; 254 255 sign = fp->fp_sign; 256 switch (fp->fp_class) { 257 case FPC_ZERO: 258 i = 0; 259 goto done; 260 261 case FPC_NUM: 262 /* 263 * If exp >= 2^64, overflow. Otherwise shift value 264 * right into last mantissa word (this will not exceed 265 * 0xffffffffffffffff), shifting any guard and round 266 * bits out into the sticky bit. Then ``round'' towards 267 * zero, i.e., just set an inexact exception if sticky 268 * is set (see round()). 269 * If the result is > 0x8000000000000000, or is positive 270 * and equals 0x8000000000000000, overflow; otherwise 271 * the last fraction word is the result. 272 */ 273 if ((exp = fp->fp_exp) >= 64) 274 break; 275 /* NB: the following includes exp < 0 cases */ 276 if (__fpu_shr(fp, FP_NMANT - 1 - exp) != 0) 277 fe->fe_cx |= FSR_NX; 278 i = ((uint64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3]; 279 if (i >= ((uint64_t)0x8000000000000000LL + sign)) 280 break; 281 if (sign) 282 i = -i; 283 goto done; 284 285 default: /* Inf, qNaN, sNaN */ 286 break; 287 } 288 /* overflow: replace any inexact exception with invalid */ 289 fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV; 290 i = 0x7fffffffffffffffLL + sign; 291 done: 292 res[1] = i & 0xffffffff; 293 return (i >> 32); 294 } 295 296 /* 297 * fpn -> single (32 bit single returned as return value). 298 * We assume <= 29 bits in a single-precision fraction (1.f part). 299 */ 300 uint32_t 301 __fpu_ftos(fe, fp) 302 struct fpemu *fe; 303 struct fpn *fp; 304 { 305 uint32_t sign = fp->fp_sign << 31; 306 int exp; 307 308 #define SNG_EXP(e) ((e) << SNG_FRACBITS) /* makes e an exponent */ 309 #define SNG_MASK (SNG_EXP(1) - 1) /* mask for fraction */ 310 311 /* Take care of non-numbers first. */ 312 if (ISNAN(fp)) { 313 /* 314 * Preserve upper bits of NaN, per SPARC V8 appendix N. 315 * Note that fp->fp_mant[0] has the quiet bit set, 316 * even if it is classified as a signalling NaN. 317 */ 318 (void) __fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS); 319 exp = SNG_EXP_INFNAN; 320 goto done; 321 } 322 if (ISINF(fp)) 323 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 324 if (ISZERO(fp)) 325 return (sign); 326 327 /* 328 * Normals (including subnormals). Drop all the fraction bits 329 * (including the explicit ``implied'' 1 bit) down into the 330 * single-precision range. If the number is subnormal, move 331 * the ``implied'' 1 into the explicit range as well, and shift 332 * right to introduce leading zeroes. Rounding then acts 333 * differently for normals and subnormals: the largest subnormal 334 * may round to the smallest normal (1.0 x 2^minexp), or may 335 * remain subnormal. A number that is subnormal before rounding 336 * will signal an underflow if the result is inexact or if underflow 337 * traps are enabled. 338 * 339 * Rounding a normal, on the other hand, always produces another 340 * normal (although either way the result might be too big for 341 * single precision, and cause an overflow). If rounding a 342 * normal produces 2.0 in the fraction, we need not adjust that 343 * fraction at all, since both 1.0 and 2.0 are zero under the 344 * fraction mask. 345 * 346 * Note that the guard and round bits vanish from the number after 347 * rounding. 348 */ 349 if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) { /* subnormal */ 350 /* -NG for g,r; -SNG_FRACBITS-exp for fraction */ 351 (void) __fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp); 352 if (fpround(fe, fp) && fp->fp_mant[3] == SNG_EXP(1)) { 353 fe->fe_cx |= FSR_UF; 354 return (sign | SNG_EXP(1) | 0); 355 } 356 if ((fe->fe_cx & FSR_NX) || 357 (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT))) 358 fe->fe_cx |= FSR_UF; 359 return (sign | SNG_EXP(0) | fp->fp_mant[3]); 360 } 361 /* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */ 362 (void) __fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS); 363 #ifdef DIAGNOSTIC 364 if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0) 365 __utrap_panic("fpu_ftos"); 366 #endif 367 if (fpround(fe, fp) && fp->fp_mant[3] == SNG_EXP(2)) 368 exp++; 369 if (exp >= SNG_EXP_INFNAN) { 370 /* overflow to inf or to max single */ 371 fe->fe_cx |= FSR_OF | FSR_NX; 372 if (toinf(fe, sign)) 373 return (sign | SNG_EXP(SNG_EXP_INFNAN)); 374 return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK); 375 } 376 done: 377 /* phew, made it */ 378 return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK)); 379 } 380 381 /* 382 * fpn -> double (32 bit high-order result returned; 32-bit low order result 383 * left in res[1]). Assumes <= 61 bits in double precision fraction. 384 * 385 * This code mimics fpu_ftos; see it for comments. 386 */ 387 uint32_t 388 __fpu_ftod(fe, fp, res) 389 struct fpemu *fe; 390 struct fpn *fp; 391 uint32_t *res; 392 { 393 uint32_t sign = fp->fp_sign << 31; 394 int exp; 395 396 #define DBL_EXP(e) ((e) << (DBL_FRACBITS & 31)) 397 #define DBL_MASK (DBL_EXP(1) - 1) 398 399 if (ISNAN(fp)) { 400 (void) __fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS); 401 exp = DBL_EXP_INFNAN; 402 goto done; 403 } 404 if (ISINF(fp)) { 405 sign |= DBL_EXP(DBL_EXP_INFNAN); 406 goto zero; 407 } 408 if (ISZERO(fp)) { 409 zero: res[1] = 0; 410 return (sign); 411 } 412 413 if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) { 414 (void) __fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp); 415 if (fpround(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) { 416 fe->fe_cx |= FSR_UF; 417 res[1] = 0; 418 return (sign | DBL_EXP(1) | 0); 419 } 420 if ((fe->fe_cx & FSR_NX) || 421 (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT))) 422 fe->fe_cx |= FSR_UF; 423 exp = 0; 424 goto done; 425 } 426 (void) __fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS); 427 if (fpround(fe, fp) && fp->fp_mant[2] == DBL_EXP(2)) 428 exp++; 429 if (exp >= DBL_EXP_INFNAN) { 430 fe->fe_cx |= FSR_OF | FSR_NX; 431 if (toinf(fe, sign)) { 432 res[1] = 0; 433 return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0); 434 } 435 res[1] = ~0; 436 return (sign | DBL_EXP(DBL_EXP_INFNAN - 1) | DBL_MASK); 437 } 438 done: 439 res[1] = fp->fp_mant[3]; 440 return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK)); 441 } 442 443 /* 444 * fpn -> extended (32 bit high-order result returned; low-order fraction 445 * words left in res[1]..res[3]). Like ftod, which is like ftos ... but 446 * our internal format *is* extended precision, plus 2 bits for guard/round, 447 * so we can avoid a small bit of work. 448 */ 449 uint32_t 450 __fpu_ftoq(fe, fp, res) 451 struct fpemu *fe; 452 struct fpn *fp; 453 uint32_t *res; 454 { 455 uint32_t sign = fp->fp_sign << 31; 456 int exp; 457 458 #define EXT_EXP(e) ((e) << (EXT_FRACBITS & 31)) 459 #define EXT_MASK (EXT_EXP(1) - 1) 460 461 if (ISNAN(fp)) { 462 (void) __fpu_shr(fp, 2); /* since we are not rounding */ 463 exp = EXT_EXP_INFNAN; 464 goto done; 465 } 466 if (ISINF(fp)) { 467 sign |= EXT_EXP(EXT_EXP_INFNAN); 468 goto zero; 469 } 470 if (ISZERO(fp)) { 471 zero: res[1] = res[2] = res[3] = 0; 472 return (sign); 473 } 474 475 if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) { 476 (void) __fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp); 477 if (fpround(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) { 478 fe->fe_cx |= FSR_UF; 479 res[1] = res[2] = res[3] = 0; 480 return (sign | EXT_EXP(1) | 0); 481 } 482 if ((fe->fe_cx & FSR_NX) || 483 (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT))) 484 fe->fe_cx |= FSR_UF; 485 exp = 0; 486 goto done; 487 } 488 /* Since internal == extended, no need to shift here. */ 489 if (fpround(fe, fp) && fp->fp_mant[0] == EXT_EXP(2)) 490 exp++; 491 if (exp >= EXT_EXP_INFNAN) { 492 fe->fe_cx |= FSR_OF | FSR_NX; 493 if (toinf(fe, sign)) { 494 res[1] = res[2] = res[3] = 0; 495 return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0); 496 } 497 res[1] = res[2] = res[3] = ~0; 498 return (sign | EXT_EXP(EXT_EXP_INFNAN - 1) | EXT_MASK); 499 } 500 done: 501 res[1] = fp->fp_mant[1]; 502 res[2] = fp->fp_mant[2]; 503 res[3] = fp->fp_mant[3]; 504 return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK)); 505 } 506 507 /* 508 * Implode an fpn, writing the result into the given space. 509 */ 510 void 511 __fpu_implode(fe, fp, type, space) 512 struct fpemu *fe; 513 struct fpn *fp; 514 int type; 515 uint32_t *space; 516 { 517 518 switch (type) { 519 case FTYPE_LNG: 520 space[0] = __fpu_ftox(fe, fp, space); 521 break; 522 523 case FTYPE_INT: 524 space[0] = __fpu_ftoi(fe, fp); 525 break; 526 527 case FTYPE_SNG: 528 space[0] = __fpu_ftos(fe, fp); 529 break; 530 531 case FTYPE_DBL: 532 space[0] = __fpu_ftod(fe, fp, space); 533 break; 534 535 case FTYPE_EXT: 536 /* funky rounding precision options ?? */ 537 space[0] = __fpu_ftoq(fe, fp, space); 538 break; 539 540 default: 541 #ifdef _KERNEL_MODE 542 panic("fpu_implode"); 543 #else 544 debugger("fpu_implode"); 545 #endif 546 } 547 DPRINTF(FPE_REG, ("fpu_implode: %x %x %x %x\n", 548 space[0], space[1], space[2], space[3])); 549 } 550