diff options
author | Akira Kakuto <kakuto@fuk.kindai.ac.jp> | 2020-07-12 09:25:07 +0000 |
---|---|---|
committer | Akira Kakuto <kakuto@fuk.kindai.ac.jp> | 2020-07-12 09:25:07 +0000 |
commit | 66a1282e4e7e96b71e2cfb45acc282a2040a7acb (patch) | |
tree | 39d6b05033785ad995eefec457d7e6a332b7bb78 /Build/source/libs/mpfr/mpfr-src/src/add1sp.c | |
parent | cffbc490ae11d44a67e8890a76086448ff67e76b (diff) |
mpfr-4.1.0
git-svn-id: svn://tug.org/texlive/trunk@55817 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Build/source/libs/mpfr/mpfr-src/src/add1sp.c')
-rw-r--r-- | Build/source/libs/mpfr/mpfr-src/src/add1sp.c | 548 |
1 files changed, 338 insertions, 210 deletions
diff --git a/Build/source/libs/mpfr/mpfr-src/src/add1sp.c b/Build/source/libs/mpfr/mpfr-src/src/add1sp.c index 64f7f03a188..ad13e3ecb6b 100644 --- a/Build/source/libs/mpfr/mpfr-src/src/add1sp.c +++ b/Build/source/libs/mpfr/mpfr-src/src/add1sp.c @@ -1,7 +1,7 @@ /* mpfr_add1sp -- internal function to perform a "real" addition All the op must have the same precision -Copyright 2004-2019 Free Software Foundation, Inc. +Copyright 2004-2020 Free Software Foundation, Inc. Contributed by the AriC and Caramba projects, INRIA. This file is part of the GNU MPFR Library. @@ -24,8 +24,21 @@ https://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc., #define MPFR_NEED_LONGLONG_H #include "mpfr-impl.h" -/* Check if we have to check the result of mpfr_add1sp with mpfr_add1 */ #if MPFR_WANT_ASSERT >= 2 +/* Check the result of mpfr_add1sp with mpfr_add1. + + Note: mpfr_add1sp itself has two algorithms: one always valid and one + faster for small precisions (up to 3 limbs). The latter one is disabled + if MPFR_GENERIC_ABI is defined. When MPFR_WANT_ASSERT >= 2, it could be + interesting to compare the results of these different algorithms. For + the time being, this is currently done by running the same code on the + same data with and without MPFR_GENERIC_ABI defined, where we have the + following comparisons in small precisions: + mpfr_add1sp slow <-> mpfr_add1 when MPFR_GENERIC_ABI is defined; + mpfr_add1sp fast <-> mpfr_add1 when MPFR_GENERIC_ABI is not defined. + By transitivity, the absence of failures implies that the 3 results are + the same. +*/ int mpfr_add1sp_ref (mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_rnd_t); int mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) @@ -68,6 +81,10 @@ int mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) inexact = mpfr_add1sp_ref (a, b, c, rnd_mode); flags = __gmpfr_flags; + /* Convert the ternary values to (-1,0,1). */ + inexact2 = VSIGN (inexact2); + inexact = VSIGN (inexact); + if (! mpfr_equal_p (tmpa, a) || inexact != inexact2 || flags != flags2) { fprintf (stderr, "add1 & add1sp return different values for %s\n" @@ -83,9 +100,10 @@ int mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) mpfr_fdump (stderr, tmpa); fprintf (stderr, "add1sp: "); mpfr_fdump (stderr, a); - fprintf (stderr, "Inexact sp = %d | Inexact = %d\n" - "Flags sp = %u | Flags = %u\n", - inexact, inexact2, flags, flags2); + fprintf (stderr, "add1 : ternary = %2d, flags =", inexact2); + flags_fout (stderr, flags2); + fprintf (stderr, "add1sp: ternary = %2d, flags =", inexact); + flags_fout (stderr, flags); MPFR_ASSERTN (0); } mpfr_clears (tmpa, tmpb, tmpc, (mpfr_ptr) 0); @@ -96,6 +114,18 @@ int mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) #if !defined(MPFR_GENERIC_ABI) +#if defined(MPFR_WANT_PROVEN_CODE) && GMP_NUMB_BITS == 64 && \ + UINT_MAX == 0xffffffff && MPFR_PREC_BITS == 64 && \ + _MPFR_PREC_FORMAT == 3 && _MPFR_EXP_FORMAT == _MPFR_PREC_FORMAT + +/* The code assumes that mp_limb_t has 64 bits exactly, unsigned int + has 32 bits exactly, mpfr_prec_t and mpfr_exp_t are of type long, + which has 64 bits exactly. */ + +#include "add1sp1_extracted.c" + +#else + /* same as mpfr_add1sp, but for p < GMP_NUMB_BITS */ static int mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, @@ -128,9 +158,16 @@ mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, ap[0] = a0 ^ rb; sb = 0; /* since b + c fits on p+1 bits, the sticky bit is zero */ } - else if (bx > cx) + else { - BGreater1: + if (bx < cx) /* swap b and c */ + { + mpfr_exp_t tx; + mp_limb_t *tp; + tx = bx; bx = cx; cx = tx; + tp = bp; bp = cp; cp = tp; + } + MPFR_ASSERTD (bx > cx); d = (mpfr_uexp_t) bx - cx; mask = MPFR_LIMB_MASK(sh); /* TODO: Should the case d < sh be removed, i.e. seen as a particular @@ -143,7 +180,7 @@ mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, a0 = bp[0] + (cp[0] >> d); if (a0 < bp[0]) /* carry */ { - MPFR_ASSERTD ((a0 & 1) == 0); + MPFR_ASSERTD ((a0 & MPFR_LIMB_ONE) == 0); a0 = MPFR_LIMB_HIGHBIT | (a0 >> 1); bx ++; } @@ -157,7 +194,7 @@ mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, a0 = bp[0] + (cp[0] >> d); if (a0 < bp[0]) /* carry */ { - sb |= a0 & 1; + sb |= a0 & MPFR_LIMB_ONE; a0 = MPFR_LIMB_HIGHBIT | (a0 >> 1); bx ++; } @@ -172,14 +209,6 @@ mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, sb = 1; /* since c <> 0 */ } } - else /* bx < cx: swap b and c */ - { - mpfr_exp_t tx; - mp_limb_t *tp; - tx = bx; bx = cx; cx = tx; - tp = bp; bp = cp; cp = tp; - goto BGreater1; - } /* Note: we could keep the output significand in a0 for the rounding, and only store it in ap[0] at the very end, but this seems slower @@ -223,6 +252,8 @@ mpfr_add1sp1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, } } +#endif /* MPFR_WANT_PROVEN_CODE */ + /* same as mpfr_add1sp, but for p = GMP_NUMB_BITS */ static int mpfr_add1sp1n (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) @@ -249,9 +280,16 @@ mpfr_add1sp1n (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) bx ++; sb = 0; /* since b + c fits on p+1 bits, the sticky bit is zero */ } - else if (bx > cx) + else { - BGreater1: + if (bx < cx) /* swap b and c */ + { + mpfr_exp_t tx; + mp_limb_t *tp; + tx = bx; bx = cx; cx = tx; + tp = bp; bp = cp; cp = tp; + } + MPFR_ASSERTD (bx > cx); d = (mpfr_uexp_t) bx - cx; if (d < GMP_NUMB_BITS) /* 1 <= d < GMP_NUMB_BITS */ { @@ -260,7 +298,7 @@ mpfr_add1sp1n (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (a0 < bp[0]) /* carry */ { ap[0] = MPFR_LIMB_HIGHBIT | (a0 >> 1); - rb = a0 & 1; + rb = a0 & MPFR_LIMB_ONE; bx ++; } else /* no carry */ @@ -277,14 +315,6 @@ mpfr_add1sp1n (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) rb = d == GMP_NUMB_BITS; } } - else /* bx < cx: swap b and c */ - { - mpfr_exp_t tx; - mp_limb_t *tp; - tx = bx; bx = cx; cx = tx; - tp = bp; bp = cp; cp = tp; - goto BGreater1; - } /* Note: we could keep the output significand in a0 for the rounding, and only store it in ap[0] at the very end, but this seems slower @@ -359,9 +389,16 @@ mpfr_add1sp2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, ap[0] = a0 ^ rb; sb = 0; /* since b + c fits on p+1 bits, the sticky bit is zero */ } - else if (bx > cx) + else { - BGreater2: + if (bx < cx) /* swap b and c */ + { + mpfr_exp_t tx; + mp_limb_t *tp; + tx = bx; bx = cx; cx = tx; + tp = bp; bp = cp; cp = tp; + } + MPFR_ASSERTD (bx > cx); d = (mpfr_uexp_t) bx - cx; mask = MPFR_LIMB_MASK(sh); if (d < GMP_NUMB_BITS) /* 0 < d < GMP_NUMB_BITS */ @@ -372,7 +409,7 @@ mpfr_add1sp2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, if (a1 < bp[1]) /* carry in high word */ { exponent_shift: - sb |= a0 & 1; + sb |= a0 & MPFR_LIMB_ONE; /* shift a by 1 */ a0 = (a1 << (GMP_NUMB_BITS - 1)) | (a0 >> 1); ap[1] = MPFR_LIMB_HIGHBIT | (a1 >> 1); @@ -405,14 +442,6 @@ mpfr_add1sp2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, sb = 1; /* since c <> 0 */ } } - else /* bx < cx: swap b and c */ - { - mpfr_exp_t tx; - mp_limb_t *tp; - tx = bx; bx = cx; cx = tx; - tp = bp; bp = cp; cp = tp; - goto BGreater2; - } /* now perform rounding */ if (MPFR_UNLIKELY(bx > __gmpfr_emax)) @@ -451,6 +480,137 @@ mpfr_add1sp2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, } } +/* same as mpfr_add1sp, but for p = 2*GMP_NUMB_BITS */ +static int +mpfr_add1sp2n (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) +{ + mpfr_exp_t bx = MPFR_GET_EXP (b); + mpfr_exp_t cx = MPFR_GET_EXP (c); + mp_limb_t *ap = MPFR_MANT(a); + mp_limb_t *bp = MPFR_MANT(b); + mp_limb_t *cp = MPFR_MANT(c); + mp_limb_t rb; /* round bit */ + mp_limb_t sb; /* sticky bit */ + mp_limb_t a1, a0; + mpfr_uexp_t d; + + if (bx == cx) + { + /* since bp[1], cp[1] >= MPFR_LIMB_HIGHBIT, a carry always occurs */ + a0 = bp[0] + cp[0]; + a1 = bp[1] + cp[1] + (a0 < bp[0]); + rb = a0 & MPFR_LIMB_ONE; + sb = 0; /* since b + c fits on p+1 bits, the sticky bit is zero */ + ap[0] = (a1 << (GMP_NUMB_BITS - 1)) | (a0 >> 1); + ap[1] = MPFR_LIMB_HIGHBIT | (a1 >> 1); + bx ++; + } + else + { + if (bx < cx) /* swap b and c */ + { + mpfr_exp_t tx; + mp_limb_t *tp; + tx = bx; bx = cx; cx = tx; + tp = bp; bp = cp; cp = tp; + } + MPFR_ASSERTD (bx > cx); + d = (mpfr_uexp_t) bx - cx; + if (d >= 2 * GMP_NUMB_BITS) + { + if (d == 2 * GMP_NUMB_BITS) + { + rb = 1; + sb = (cp[0] != MPFR_LIMB_ZERO || + cp[1] > MPFR_LIMB_HIGHBIT); + } + else + { + rb = 0; + sb = 1; + } + ap[0] = bp[0]; + ap[1] = bp[1]; + } + else + { + /* First, compute (a0,a1) = b + (c >> d), and determine sb from + the bits shifted out such that (MSB, other bits) is regarded + as (rounding bit, sticky bit), assuming no carry. */ + if (d < GMP_NUMB_BITS) /* 0 < d < GMP_NUMB_BITS */ + { + sb = cp[0] << (GMP_NUMB_BITS - d); + a0 = bp[0] + ((cp[1] << (GMP_NUMB_BITS - d)) | (cp[0] >> d)); + a1 = bp[1] + (cp[1] >> d) + (a0 < bp[0]); + } + else /* GMP_NUMB_BITS <= d < 2 * GMP_NUMB_BITS */ + { + /* The most significant bit of sb should be the rounding bit, + while the other bits represent the sticky bit: + * if d = GMP_NUMB_BITS, we get cp[0]; + * if d > GMP_NUMB_BITS: we get the least d-GMP_NUMB_BITS bits + of cp[1], and those from cp[0] as the LSB of sb. */ + sb = (d == GMP_NUMB_BITS) ? cp[0] + : (cp[1] << (2*GMP_NUMB_BITS-d)) | (cp[0] != 0); + a0 = bp[0] + (cp[1] >> (d - GMP_NUMB_BITS)); + a1 = bp[1] + (a0 < bp[0]); + } + if (a1 < bp[1]) /* carry in high word */ + { + rb = a0 << (GMP_NUMB_BITS - 1); + /* and sb is the real sticky bit. */ + /* Shift the result by 1 to the right. */ + ap[0] = (a1 << (GMP_NUMB_BITS - 1)) | (a0 >> 1); + ap[1] = MPFR_LIMB_HIGHBIT | (a1 >> 1); + bx ++; + } + else + { + rb = MPFR_LIMB_MSB (sb); + sb <<= 1; + ap[0] = a0; + ap[1] = a1; + } + } + } + + /* now perform rounding */ + if (MPFR_UNLIKELY(bx > __gmpfr_emax)) + return mpfr_overflow (a, rnd_mode, MPFR_SIGN(a)); + + MPFR_SET_EXP (a, bx); + if ((rb == 0 && sb == 0) || rnd_mode == MPFR_RNDF) + MPFR_RET(0); + else if (rnd_mode == MPFR_RNDN) + { + if (rb == 0 || (sb == 0 && (ap[0] & MPFR_LIMB_ONE) == 0)) + goto truncate; + else + goto add_one_ulp; + } + else if (MPFR_IS_LIKE_RNDZ(rnd_mode, MPFR_IS_NEG(a))) + { + truncate: + MPFR_RET(-MPFR_SIGN(a)); + } + else /* round away from zero */ + { + add_one_ulp: + ap[0] += MPFR_LIMB_ONE; + ap[1] += (ap[0] == 0); + if (MPFR_UNLIKELY(ap[1] == 0)) + { + ap[1] = MPFR_LIMB_HIGHBIT; + /* no need to have MPFR_LIKELY here, since we are in a rare branch */ + if (bx + 1 <= __gmpfr_emax) + MPFR_SET_EXP (a, bx + 1); + else /* overflow */ + return mpfr_overflow (a, rnd_mode, MPFR_SIGN(a)); + } + MPFR_RET(MPFR_SIGN(a)); + } +} + /* same as mpfr_add1sp, but for 2*GMP_NUMB_BITS < p < 3*GMP_NUMB_BITS */ static int mpfr_add1sp3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, @@ -485,9 +645,16 @@ mpfr_add1sp3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, ap[0] = a0 ^ rb; sb = 0; /* since b + c fits on p+1 bits, the sticky bit is zero */ } - else if (bx > cx) + else { - BGreater2: + if (bx < cx) /* swap b and c */ + { + mpfr_exp_t tx; + mp_limb_t *tp; + tx = bx; bx = cx; cx = tx; + tp = bp; bp = cp; cp = tp; + } + MPFR_ASSERTD (bx > cx); d = (mpfr_uexp_t) bx - cx; mask = MPFR_LIMB_MASK(sh); if (d < GMP_NUMB_BITS) /* 0 < d < GMP_NUMB_BITS */ @@ -502,7 +669,7 @@ mpfr_add1sp3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, if (a2 < bp[2] || (a2 == bp[2] && cy)) /* carry in high word */ { exponent_shift: - sb |= a0 & 1; + sb |= a0 & MPFR_LIMB_ONE; /* shift a by 1 */ a0 = (a1 << (GMP_NUMB_BITS - 1)) | (a0 >> 1); ap[1] = (a2 << (GMP_NUMB_BITS - 1)) | (a1 >> 1); @@ -565,14 +732,6 @@ mpfr_add1sp3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, sb = 1; /* since c <> 0 */ } } - else /* bx < cx: swap b and c */ - { - mpfr_exp_t tx; - mp_limb_t *tp; - tx = bx; bx = cx; cx = tx; - tp = bp; bp = cp; cp = tp; - goto BGreater2; - } /* now perform rounding */ if (MPFR_UNLIKELY(bx > __gmpfr_emax)) @@ -614,6 +773,82 @@ mpfr_add1sp3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode, #endif /* !defined(MPFR_GENERIC_ABI) */ +/* {ap, n} <- {bp, n} + {cp + q, n - q} >> r where d = q * GMP_NUMB_BITS + r. + Return the carry at ap[n+1] (0 or 1) and set *low so that: + * the most significant bit of *low would be that of ap[-1] if we would + compute one more limb of the (infinite) addition + * the GMP_NUMB_BITS-1 least significant bits of *low are zero iff all bits + of ap[-1], ap[-2], ... would be zero (except the most significant bit + of ap[-1). + Assume 0 < d < GMP_NUMB_BITS*n. */ +static mp_limb_t +mpfr_addrsh (mp_limb_t *ap, mp_limb_t *bp, mp_limb_t *cp, mp_size_t n, + mp_size_t d, mp_limb_t *low) +{ + mp_limb_t cy, cy2, c_shifted; + mp_size_t i; + + if (d < GMP_NUMB_BITS) + { + /* {ap, n} <- {bp, n} + {cp, n} >> d */ + MPFR_ASSERTD (d > 0); + /* thus 0 < GMP_NUMB_BITS - d < GMP_NUMB_BITS */ + *low = cp[0] << (GMP_NUMB_BITS - d); + for (i = 0, cy = 0; i < n - 1; i++) + { + c_shifted = (cp[i+1] << (GMP_NUMB_BITS - d)) | (cp[i] >> d); + ap[i] = bp[i] + c_shifted; + cy2 = ap[i] < c_shifted; + ap[i] += cy; + cy = cy2 + (ap[i] < cy); + } + /* most significant limb is special */ + c_shifted = cp[i] >> d; + ap[i] = bp[i] + c_shifted; + cy2 = ap[i] < c_shifted; + ap[i] += cy; + cy = cy2 + (ap[i] < cy); + } + else /* d >= GMP_NUMB_BITS */ + { + mp_size_t q = d / GMP_NUMB_BITS; + mpfr_uexp_t r = d % GMP_NUMB_BITS; + if (r == 0) + { + MPFR_ASSERTD(q > 0); + *low = cp[q-1]; + for (i = 0; i < q-1; i++) + *low |= !!cp[i]; + cy = mpn_add_n (ap, bp, cp + q, n - q); + cy = mpn_add_1 (ap + n - q, bp + n - q, q, cy); + } + else /* 0 < r < GMP_NUMB_BITS */ + { + *low = cp[q] << (GMP_NUMB_BITS - r); + for (i = 0; i < q; i++) + *low |= !!cp[i]; + for (i = 0, cy = 0; i < n - q - 1; i++) + { + c_shifted = (cp[q+i+1] << (GMP_NUMB_BITS - r)) | (cp[q+i] >> r); + ap[i] = bp[i] + c_shifted; + cy2 = ap[i] < c_shifted; + ap[i] += cy; + cy = cy2 + (ap[i] < cy); + } + /* most significant limb of c is special */ + MPFR_ASSERTD(i == n - q - 1); + c_shifted = cp[n-1] >> r; + ap[i] = bp[i] + c_shifted; + cy2 = ap[i] < c_shifted; + ap[i] += cy; + cy = cy2 + (ap[i] < cy); + /* upper limbs are copied */ + cy = mpn_add_1 (ap + n - q, bp + n - q, q, cy); + } + } + return cy; +} + /* compute sign(b) * (|b| + |c|). Returns 0 iff result is exact, a negative value when the result is less than the exact value, @@ -625,14 +860,11 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) mpfr_prec_t p; unsigned int sh; mp_size_t n; - mp_limb_t *ap, *cp; + mp_limb_t *ap = MPFR_MANT(a); mpfr_exp_t bx; - mp_limb_t limb; + mp_limb_t limb, rb, sb; int inexact; int neg; - MPFR_TMP_DECL(marker); - - MPFR_TMP_MARK(marker); MPFR_ASSERTD(MPFR_PREC(a) == MPFR_PREC(b) && MPFR_PREC(b) == MPFR_PREC(c)); MPFR_ASSERTD(MPFR_IS_PURE_FP(b)); @@ -657,6 +889,9 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (2 * GMP_NUMB_BITS < p && p < 3 * GMP_NUMB_BITS) return mpfr_add1sp3 (a, b, c, rnd_mode, p); + + if (p == 2 * GMP_NUMB_BITS) + return mpfr_add1sp2n (a, b, c, rnd_mode); #endif /* We need to get the sign before the possible exchange. */ @@ -684,14 +919,13 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) /* mpfr_print_mant_binary("C= ", MPFR_MANT(c), p); */ /* mpfr_print_mant_binary("B= ", MPFR_MANT(b), p); */ bx++; /* exp + 1 */ - ap = MPFR_MANT(a); limb = mpn_add_n (ap, MPFR_MANT(b), MPFR_MANT(c), n); /* mpfr_print_mant_binary("A= ", ap, p); */ MPFR_ASSERTD(limb != 0); /* There must be a carry */ - limb = ap[0]; /* Get LSB (In fact, LSW) */ + rb = ap[0] & (MPFR_LIMB_ONE << sh); /* Get round bit (sb=0) */ mpn_rshift (ap, ap, n, 1); /* Shift mantissa A */ ap[n-1] |= MPFR_LIMB_HIGHBIT; /* Set MSB */ - ap[0] &= ~MPFR_LIMB_MASK(sh); /* Clear LSB bit */ + ap[0] &= ~MPFR_LIMB_MASK(sh); /* Clear round bit */ /* Fast track for faithful rounding: since b and c have the same @@ -700,7 +934,7 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (rnd_mode == MPFR_RNDF) { inexact = 0; goto set_exponent; } - if ((limb & (MPFR_LIMB_ONE << sh)) == 0) /* Check exact case */ + if (rb == 0) /* Check exact case */ { inexact = 0; goto set_exponent; } /* Zero: Truncate @@ -734,16 +968,16 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (rnd_mode == MPFR_RNDN || MPFR_IS_LIKE_RNDZ (rnd_mode, neg)) { copy_set_exponent: - ap = MPFR_MANT (a); - MPN_COPY (ap, MPFR_MANT(b), n); + if (a != b) + MPN_COPY (ap, MPFR_MANT(b), n); inexact = -1; goto set_exponent; } else { copy_add_one_ulp: - ap = MPFR_MANT(a); - MPN_COPY (ap, MPFR_MANT(b), n); + if (a != b) + MPN_COPY (ap, MPFR_MANT(b), n); goto add_one_ulp; } } @@ -756,18 +990,9 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (rnd_mode == MPFR_RNDN) { /* Check if C was a power of 2 */ - cp = MPFR_MANT(c); - if (MPFR_UNLIKELY (cp[n-1] == MPFR_LIMB_HIGHBIT)) - { - mp_size_t k = n-1; - do - k--; - while (k >= 0 && cp[k] == 0); - if (MPFR_UNLIKELY (k < 0)) - /* Power of 2: Even rule */ - if (((MPFR_MANT (b))[0] & (MPFR_LIMB_ONE << sh)) == 0) - goto copy_set_exponent; - } + if (mpfr_powerof2_raw (c) && + ((MPFR_MANT (b))[0] & (MPFR_LIMB_ONE << sh)) == 0) + goto copy_set_exponent; /* Not a Power of 2 */ goto copy_add_one_ulp; } @@ -779,162 +1004,67 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) } else /* 0 < d < p */ { - mp_limb_t mask; - mp_limb_t bcp, bcp1; /* Cp and C'p+1 */ + mp_limb_t mask = ~MPFR_LIMB_MASK(sh); /* General case: 1 <= d < p */ - cp = MPFR_TMP_LIMBS_ALLOC (n); - - /* Shift c in temporary allocated place */ - { - mpfr_uexp_t dm; - mp_size_t m; - - dm = d % GMP_NUMB_BITS; - m = d / GMP_NUMB_BITS; - if (MPFR_UNLIKELY (dm == 0)) - { - /* dm = 0 and m > 0: Just copy */ - MPFR_ASSERTD (m != 0); - MPN_COPY(cp, MPFR_MANT(c)+m, n-m); - MPN_ZERO(cp+n-m, m); - } - else if (MPFR_LIKELY(m == 0)) - { - /* dm >=1 and m == 0: just shift */ - MPFR_ASSERTD(dm >= 1); - mpn_rshift(cp, MPFR_MANT(c), n, dm); - } - else - { - /* dm > 0 and m > 0: shift and zero */ - mpn_rshift(cp, MPFR_MANT(c)+m, n-m, dm); - MPN_ZERO(cp+n-m, m); - } - } - - /* mpfr_print_mant_binary("Before", MPFR_MANT(c), p); - mpfr_print_mant_binary("B= ", MPFR_MANT(b), p); - mpfr_print_mant_binary("After ", cp, p); */ - /* fast track for RNDF */ - if (rnd_mode == MPFR_RNDF) - { - /* The bcp and bcp1 values will not matter for MPFR_RNDF, but - let's set them to 0 to avoid undefined behavior. */ - bcp1 = bcp = 0; - goto clean; - } + limb = mpfr_addrsh (ap, MPFR_MANT(b), MPFR_MANT(c), n, d, &sb); + /* the most significant bit of sb contains what would be the most + significant bit of ap[-1], and the remaining bits of sb are 0 + iff the remaining bits of ap[-1], ap[-2], ... are all zero */ - /* Compute bcp=Cp and bcp1=C'p+1: bcp is the first neglected bit - (round bit), and bcp1 corresponds to the remaining bits (sticky bit). - */ if (sh > 0) { - /* Try to compute them from C' rather than C */ - bcp = (cp[0] & (MPFR_LIMB_ONE<<(sh-1))); - if (MPFR_LIKELY (cp[0] & MPFR_LIMB_MASK (sh - 1))) - bcp1 = 1; - else - { - /* We can't compute C'p+1 from C'. Compute it from C */ - /* Start from bit x=p-d+sh in mantissa C - (+sh since we have already looked sh bits in C'!) */ - mpfr_prec_t x = p - d + sh - 1; - if (MPFR_LIKELY (x > p)) - /* We are already looked at all the bits of c, so C'p+1 = 0*/ - bcp1 = 0; - else - { - mp_limb_t *tp = MPFR_MANT(c); - mp_size_t kx = n - 1 - (x / GMP_NUMB_BITS); - mpfr_prec_t sx = GMP_NUMB_BITS - 1 - (x % GMP_NUMB_BITS); - /* printf ("(First) x=%lu Kx=%ld Sx=%lu\n", - (unsigned long) x, (long) kx, (unsigned long) sx); */ - /* Looks at the last bits of limb kx (if sx=0 does nothing)*/ - if (tp[kx] & MPFR_LIMB_MASK(sx)) - bcp1 = 1; - else - { - /*kx += (sx==0);*/ - /*If sx==0, tp[kx] hasn't been checked*/ - do - kx--; - while (kx >= 0 && tp[kx] == 0); - bcp1 = (kx >= 0); - } - } - } + /* The round bit and a part of the sticky bit are in ap[0]. */ + rb = (ap[0] & (MPFR_LIMB_ONE << (sh - 1))); + sb |= ap[0] & MPFR_LIMB_MASK (sh - 1); } - else /* sh == 0 */ - { - /* Compute Cp and C'p+1 from C with sh=0 */ - mp_limb_t *tp = MPFR_MANT(c); - /* Start from bit x=p-d in mantissa C */ - mpfr_prec_t x = p - d; - mp_size_t kx = n - 1 - (x / GMP_NUMB_BITS); - mpfr_prec_t sx = GMP_NUMB_BITS - 1 - (x % GMP_NUMB_BITS); - MPFR_ASSERTD (p >= d); - bcp = tp[kx] & (MPFR_LIMB_ONE << sx); - /* Looks at the last bits of limb kx (If sx=0, does nothing)*/ - if (tp[kx] & MPFR_LIMB_MASK(sx)) - bcp1 = 1; - else - { - do - kx--; - while (kx >= 0 && tp[kx] == 0); - bcp1 = (kx >= 0); - } + else + { + /* The round bit and possibly a part of the sticky bit are + in sb. */ + rb = sb & MPFR_LIMB_HIGHBIT; + sb &= ~MPFR_LIMB_HIGHBIT; } - /* printf("sh=%u Cp=%lu C'p+1=%lu\n", sh, - (unsigned long) bcp, (unsigned long) bcp1); */ - - clean: - /* Clean shifted C' */ - mask = ~MPFR_LIMB_MASK(sh); - cp[0] &= mask; - - /* Add the mantissa c from b in a */ - ap = MPFR_MANT(a); - limb = mpn_add_n (ap, MPFR_MANT(b), cp, n); - /* mpfr_print_mant_binary("Add= ", ap, p); */ - - /* Check for overflow */ - if (MPFR_UNLIKELY (limb)) - { - limb = ap[0] & (MPFR_LIMB_ONE<<sh); /* Get LSB */ - mpn_rshift (ap, ap, n, 1); /* Shift mantissa */ - bx++; /* Fix exponent */ - ap[n-1] |= MPFR_LIMB_HIGHBIT; /* Set MSB */ - ap[0] &= mask; /* Clear LSB bit */ - bcp1 |= bcp; /* Recompute C'p+1 */ - bcp = limb; /* Recompute Cp */ - /* printf ("(Overflow) Cp=%lu C'p+1=%lu\n", - (unsigned long) bcp, (unsigned long) bcp1); + + ap[0] &= mask; + + /* Check for carry out */ + if (MPFR_UNLIKELY (limb != 0)) + { + limb = ap[0] & (MPFR_LIMB_ONE << sh); /* Get LSB (will be new rb) */ + mpn_rshift (ap, ap, n, 1); /* Shift significand */ + bx++; /* Increase exponent */ + ap[n-1] |= MPFR_LIMB_HIGHBIT; /* Set MSB */ + ap[0] &= mask; /* Clear LSB */ + sb |= rb; /* Update sb */ + rb = limb; /* New rb */ + /* printf ("(Overflow) rb=%lu sb=%lu\n", + (unsigned long) rb, (unsigned long) sb); mpfr_print_mant_binary ("Add= ", ap, p); */ } /* Round: Zero: Truncate but could be exact. - Away: Add 1 if Cp or C'p+1 !=0 - Nearest: Truncate but could be exact if Cp==0 - Add 1 if C'p+1 !=0, + Away: Add 1 if rb or sb !=0 + Nearest: Truncate but could be exact if sb==0 + Add 1 if rb !=0, Even rule else */ if (MPFR_LIKELY(rnd_mode == MPFR_RNDF)) { inexact = 0; goto set_exponent; } else if (rnd_mode == MPFR_RNDN) { - inexact = - (bcp1 != 0); - if (bcp == 0) + inexact = - (sb != 0); + if (rb == 0) goto set_exponent; - else if (MPFR_UNLIKELY(bcp1==0) && (ap[0]&(MPFR_LIMB_ONE<<sh))==0) + else if (MPFR_UNLIKELY (sb == 0) && + (ap[0] & (MPFR_LIMB_ONE << sh)) == 0) { inexact = -1; goto set_exponent; } else goto add_one_ulp; } MPFR_UPDATE_RND_MODE(rnd_mode, neg); - inexact = -(bcp != 0 || bcp1 != 0); + inexact = - (rb != 0 || sb != 0); if (rnd_mode == MPFR_RNDZ || inexact == 0) goto set_exponent; else @@ -945,7 +1075,7 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) add_one_ulp: /* add one unit in last place to a */ /* printf("AddOneUlp\n"); */ - if (MPFR_UNLIKELY( mpn_add_1(ap, ap, n, MPFR_LIMB_ONE<<sh) )) + if (MPFR_UNLIKELY (mpn_add_1 (ap, ap, n, MPFR_LIMB_ONE << sh))) { /* Case 100000x0 = 0x1111x1 + 1*/ /* printf("Pow of 2\n"); */ @@ -958,11 +1088,9 @@ mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode) if (MPFR_UNLIKELY(bx > __gmpfr_emax)) /* Check for overflow */ { /* printf("Overflow\n"); */ - MPFR_TMP_FREE(marker); return mpfr_overflow (a, rnd_mode, MPFR_SIGN(a)); } MPFR_SET_EXP (a, bx); - MPFR_TMP_FREE(marker); MPFR_RET (inexact * MPFR_INT_SIGN (a)); } |