Daily bump.
[official-gcc.git] / gcc / sreal.c
blob415a02c8352ef9d92b4dff660a33d9639938a49f
1 /* Simple data type for positive real numbers for the GNU compiler.
2 Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This library supports positive real numbers and 0;
21 inf and nan are NOT supported.
22 It is written to be simple and fast.
24 Value of sreal is
25 x = sig * 2 ^ exp
26 where
27 sig = significant
28 (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
29 exp = exponent
31 One HOST_WIDE_INT is used for the significant on 64-bit (and more than
32 64-bit) machines,
33 otherwise two HOST_WIDE_INTs are used for the significant.
34 Only a half of significant bits is used (in normalized sreals) so that we do
35 not have problems with overflow, for example when c->sig = a->sig * b->sig.
36 So the precision for 64-bit and 32-bit machines is 32-bit.
38 Invariant: The numbers are normalized before and after each call of sreal_*.
40 Normalized sreals:
41 All numbers (except zero) meet following conditions:
42 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
43 -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
45 If the number would be too large, it is set to upper bounds of these
46 conditions.
48 If the number is zero or would be too small it meets following conditions:
49 sig == 0 && exp == -SREAL_MAX_EXP
52 #include "config.h"
53 #include "system.h"
54 #include "coretypes.h"
55 #include "tm.h"
56 #include "sreal.h"
58 static inline void copy (sreal *, sreal *);
59 static inline void shift_right (sreal *, int);
60 static void normalize (sreal *);
62 /* Print the content of struct sreal. */
64 void
65 dump_sreal (FILE *file, sreal *x)
67 #if SREAL_PART_BITS < 32
68 fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
69 HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
70 x->sig_hi, x->sig_lo, x->exp);
71 #else
72 fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
73 #endif
76 /* Copy the sreal number. */
78 static inline void
79 copy (sreal *r, sreal *a)
81 #if SREAL_PART_BITS < 32
82 r->sig_lo = a->sig_lo;
83 r->sig_hi = a->sig_hi;
84 #else
85 r->sig = a->sig;
86 #endif
87 r->exp = a->exp;
90 /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
91 When the most significant bit shifted out is 1, add 1 to X (rounding). */
93 static inline void
94 shift_right (sreal *x, int s)
96 gcc_assert (s > 0);
97 gcc_assert (s <= SREAL_BITS);
98 /* Exponent should never be so large because shift_right is used only by
99 sreal_add and sreal_sub ant thus the number cannot be shifted out from
100 exponent range. */
101 gcc_assert (x->exp + s <= SREAL_MAX_EXP);
103 x->exp += s;
105 #if SREAL_PART_BITS < 32
106 if (s > SREAL_PART_BITS)
108 s -= SREAL_PART_BITS;
109 x->sig_hi += (uhwi) 1 << (s - 1);
110 x->sig_lo = x->sig_hi >> s;
111 x->sig_hi = 0;
113 else
115 x->sig_lo += (uhwi) 1 << (s - 1);
116 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
118 x->sig_hi++;
119 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
121 x->sig_lo >>= s;
122 x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
123 x->sig_hi >>= s;
125 #else
126 x->sig += (uhwi) 1 << (s - 1);
127 x->sig >>= s;
128 #endif
131 /* Normalize *X. */
133 static void
134 normalize (sreal *x)
136 #if SREAL_PART_BITS < 32
137 int shift;
138 HOST_WIDE_INT mask;
140 if (x->sig_lo == 0 && x->sig_hi == 0)
142 x->exp = -SREAL_MAX_EXP;
144 else if (x->sig_hi < SREAL_MIN_SIG)
146 if (x->sig_hi == 0)
148 /* Move lower part of significant to higher part. */
149 x->sig_hi = x->sig_lo;
150 x->sig_lo = 0;
151 x->exp -= SREAL_PART_BITS;
153 shift = 0;
154 while (x->sig_hi < SREAL_MIN_SIG)
156 x->sig_hi <<= 1;
157 x->exp--;
158 shift++;
160 /* Check underflow. */
161 if (x->exp < -SREAL_MAX_EXP)
163 x->exp = -SREAL_MAX_EXP;
164 x->sig_hi = 0;
165 x->sig_lo = 0;
167 else if (shift)
169 mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
170 x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
171 x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
174 else if (x->sig_hi > SREAL_MAX_SIG)
176 unsigned HOST_WIDE_INT tmp = x->sig_hi;
178 /* Find out how many bits will be shifted. */
179 shift = 0;
182 tmp >>= 1;
183 shift++;
185 while (tmp > SREAL_MAX_SIG);
187 /* Round the number. */
188 x->sig_lo += (uhwi) 1 << (shift - 1);
190 x->sig_lo >>= shift;
191 x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
192 << (SREAL_PART_BITS - shift));
193 x->sig_hi >>= shift;
194 x->exp += shift;
195 if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
197 x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
198 x->sig_hi++;
199 if (x->sig_hi > SREAL_MAX_SIG)
201 /* x->sig_hi was SREAL_MAX_SIG before increment
202 so now last bit is zero. */
203 x->sig_hi >>= 1;
204 x->sig_lo >>= 1;
205 x->exp++;
209 /* Check overflow. */
210 if (x->exp > SREAL_MAX_EXP)
212 x->exp = SREAL_MAX_EXP;
213 x->sig_hi = SREAL_MAX_SIG;
214 x->sig_lo = SREAL_MAX_SIG;
217 #else
218 if (x->sig == 0)
220 x->exp = -SREAL_MAX_EXP;
222 else if (x->sig < SREAL_MIN_SIG)
226 x->sig <<= 1;
227 x->exp--;
229 while (x->sig < SREAL_MIN_SIG);
231 /* Check underflow. */
232 if (x->exp < -SREAL_MAX_EXP)
234 x->exp = -SREAL_MAX_EXP;
235 x->sig = 0;
238 else if (x->sig > SREAL_MAX_SIG)
240 int last_bit;
243 last_bit = x->sig & 1;
244 x->sig >>= 1;
245 x->exp++;
247 while (x->sig > SREAL_MAX_SIG);
249 /* Round the number. */
250 x->sig += last_bit;
251 if (x->sig > SREAL_MAX_SIG)
253 x->sig >>= 1;
254 x->exp++;
257 /* Check overflow. */
258 if (x->exp > SREAL_MAX_EXP)
260 x->exp = SREAL_MAX_EXP;
261 x->sig = SREAL_MAX_SIG;
264 #endif
267 /* Set *R to SIG * 2 ^ EXP. Return R. */
269 sreal *
270 sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
272 #if SREAL_PART_BITS < 32
273 r->sig_lo = 0;
274 r->sig_hi = sig;
275 r->exp = exp - 16;
276 #else
277 r->sig = sig;
278 r->exp = exp;
279 #endif
280 normalize (r);
281 return r;
284 /* Return integer value of *R. */
286 HOST_WIDE_INT
287 sreal_to_int (sreal *r)
289 #if SREAL_PART_BITS < 32
290 if (r->exp <= -SREAL_BITS)
291 return 0;
292 if (r->exp >= 0)
293 return MAX_HOST_WIDE_INT;
294 return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
295 #else
296 if (r->exp <= -SREAL_BITS)
297 return 0;
298 if (r->exp >= SREAL_PART_BITS)
299 return MAX_HOST_WIDE_INT;
300 if (r->exp > 0)
301 return r->sig << r->exp;
302 if (r->exp < 0)
303 return r->sig >> -r->exp;
304 return r->sig;
305 #endif
308 /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
311 sreal_compare (sreal *a, sreal *b)
313 if (a->exp > b->exp)
314 return 1;
315 if (a->exp < b->exp)
316 return -1;
317 #if SREAL_PART_BITS < 32
318 if (a->sig_hi > b->sig_hi)
319 return 1;
320 if (a->sig_hi < b->sig_hi)
321 return -1;
322 if (a->sig_lo > b->sig_lo)
323 return 1;
324 if (a->sig_lo < b->sig_lo)
325 return -1;
326 #else
327 if (a->sig > b->sig)
328 return 1;
329 if (a->sig < b->sig)
330 return -1;
331 #endif
332 return 0;
335 /* *R = *A + *B. Return R. */
337 sreal *
338 sreal_add (sreal *r, sreal *a, sreal *b)
340 int dexp;
341 sreal tmp;
342 sreal *bb;
344 if (sreal_compare (a, b) < 0)
346 sreal *swap;
347 swap = a;
348 a = b;
349 b = swap;
352 dexp = a->exp - b->exp;
353 r->exp = a->exp;
354 if (dexp > SREAL_BITS)
356 #if SREAL_PART_BITS < 32
357 r->sig_hi = a->sig_hi;
358 r->sig_lo = a->sig_lo;
359 #else
360 r->sig = a->sig;
361 #endif
362 return r;
365 if (dexp == 0)
366 bb = b;
367 else
369 copy (&tmp, b);
370 shift_right (&tmp, dexp);
371 bb = &tmp;
374 #if SREAL_PART_BITS < 32
375 r->sig_hi = a->sig_hi + bb->sig_hi;
376 r->sig_lo = a->sig_lo + bb->sig_lo;
377 if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
379 r->sig_hi++;
380 r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
382 #else
383 r->sig = a->sig + bb->sig;
384 #endif
385 normalize (r);
386 return r;
389 /* *R = *A - *B. Return R. */
391 sreal *
392 sreal_sub (sreal *r, sreal *a, sreal *b)
394 int dexp;
395 sreal tmp;
396 sreal *bb;
398 gcc_assert (sreal_compare (a, b) >= 0);
400 dexp = a->exp - b->exp;
401 r->exp = a->exp;
402 if (dexp > SREAL_BITS)
404 #if SREAL_PART_BITS < 32
405 r->sig_hi = a->sig_hi;
406 r->sig_lo = a->sig_lo;
407 #else
408 r->sig = a->sig;
409 #endif
410 return r;
412 if (dexp == 0)
413 bb = b;
414 else
416 copy (&tmp, b);
417 shift_right (&tmp, dexp);
418 bb = &tmp;
421 #if SREAL_PART_BITS < 32
422 if (a->sig_lo < bb->sig_lo)
424 r->sig_hi = a->sig_hi - bb->sig_hi - 1;
425 r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
427 else
429 r->sig_hi = a->sig_hi - bb->sig_hi;
430 r->sig_lo = a->sig_lo - bb->sig_lo;
432 #else
433 r->sig = a->sig - bb->sig;
434 #endif
435 normalize (r);
436 return r;
439 /* *R = *A * *B. Return R. */
441 sreal *
442 sreal_mul (sreal *r, sreal *a, sreal *b)
444 #if SREAL_PART_BITS < 32
445 if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
447 r->sig_lo = 0;
448 r->sig_hi = 0;
449 r->exp = -SREAL_MAX_EXP;
451 else
453 unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
454 if (sreal_compare (a, b) < 0)
456 sreal *swap;
457 swap = a;
458 a = b;
459 b = swap;
462 r->exp = a->exp + b->exp + SREAL_PART_BITS;
464 tmp1 = a->sig_lo * b->sig_lo;
465 tmp2 = a->sig_lo * b->sig_hi;
466 tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
468 r->sig_hi = a->sig_hi * b->sig_hi;
469 r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
470 tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
471 tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
472 tmp1 = tmp2 + tmp3;
474 r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
475 r->sig_hi += tmp1 >> SREAL_PART_BITS;
477 normalize (r);
479 #else
480 if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
482 r->sig = 0;
483 r->exp = -SREAL_MAX_EXP;
485 else
487 r->sig = a->sig * b->sig;
488 r->exp = a->exp + b->exp;
489 normalize (r);
491 #endif
492 return r;
495 /* *R = *A / *B. Return R. */
497 sreal *
498 sreal_div (sreal *r, sreal *a, sreal *b)
500 #if SREAL_PART_BITS < 32
501 unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
503 gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
504 if (a->sig_hi < SREAL_MIN_SIG)
506 r->sig_hi = 0;
507 r->sig_lo = 0;
508 r->exp = -SREAL_MAX_EXP;
510 else
512 /* Since division by the whole number is pretty ugly to write
513 we are dividing by first 3/4 of bits of number. */
515 tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
516 tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
517 + (b->sig_lo >> (SREAL_PART_BITS / 2)));
518 if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
519 tmp2++;
521 r->sig_lo = 0;
522 tmp = tmp1 / tmp2;
523 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
524 r->sig_hi = tmp << SREAL_PART_BITS;
526 tmp = tmp1 / tmp2;
527 tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
528 r->sig_hi += tmp << (SREAL_PART_BITS / 2);
530 tmp = tmp1 / tmp2;
531 r->sig_hi += tmp;
533 r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
534 normalize (r);
536 #else
537 gcc_assert (b->sig != 0);
538 r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
539 r->exp = a->exp - b->exp - SREAL_PART_BITS;
540 normalize (r);
541 #endif
542 return r;