libbacktrace: don't assume compressed section is aligned
[official-gcc.git] / libgcc / libgcc2.c
blobef46153731fd619f9869ff9d21e1df43858349a7
1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989-2024 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "tconfig.h"
27 #include "tsystem.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "libgcc_tm.h"
32 #ifdef HAVE_GAS_HIDDEN
33 #define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
34 #else
35 #define ATTRIBUTE_HIDDEN
36 #endif
38 /* Work out the largest "word" size that we can deal with on this target. */
39 #if MIN_UNITS_PER_WORD > 4
40 # define LIBGCC2_MAX_UNITS_PER_WORD 8
41 #elif (MIN_UNITS_PER_WORD > 2 \
42 || (MIN_UNITS_PER_WORD > 1 && __SIZEOF_LONG_LONG__ > 4))
43 # define LIBGCC2_MAX_UNITS_PER_WORD 4
44 #else
45 # define LIBGCC2_MAX_UNITS_PER_WORD MIN_UNITS_PER_WORD
46 #endif
48 /* Work out what word size we are using for this compilation.
49 The value can be set on the command line. */
50 #ifndef LIBGCC2_UNITS_PER_WORD
51 #define LIBGCC2_UNITS_PER_WORD LIBGCC2_MAX_UNITS_PER_WORD
52 #endif
54 #if LIBGCC2_UNITS_PER_WORD <= LIBGCC2_MAX_UNITS_PER_WORD
56 #include "libgcc2.h"
58 #ifdef DECLARE_LIBRARY_RENAMES
59 DECLARE_LIBRARY_RENAMES
60 #endif
62 #if defined (L_negdi2)
63 DWtype
64 __negdi2 (DWtype u)
66 const DWunion uu = {.ll = u};
67 const DWunion w = { {.low = -uu.s.low,
68 .high = -uu.s.high - ((UWtype) -uu.s.low > 0) } };
70 return w.ll;
72 #endif
74 #ifdef L_addvsi3
75 Wtype
76 __addvSI3 (Wtype a, Wtype b)
78 Wtype w;
80 if (__builtin_add_overflow (a, b, &w))
81 abort ();
83 return w;
85 #ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
86 SItype
87 __addvsi3 (SItype a, SItype b)
89 SItype w;
91 if (__builtin_add_overflow (a, b, &w))
92 abort ();
94 return w;
96 #endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
97 #endif
99 #ifdef L_addvdi3
100 DWtype
101 __addvDI3 (DWtype a, DWtype b)
103 DWtype w;
105 if (__builtin_add_overflow (a, b, &w))
106 abort ();
108 return w;
110 #endif
112 #ifdef L_subvsi3
113 Wtype
114 __subvSI3 (Wtype a, Wtype b)
116 Wtype w;
118 if (__builtin_sub_overflow (a, b, &w))
119 abort ();
121 return w;
123 #ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
124 SItype
125 __subvsi3 (SItype a, SItype b)
127 SItype w;
129 if (__builtin_sub_overflow (a, b, &w))
130 abort ();
132 return w;
134 #endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
135 #endif
137 #ifdef L_subvdi3
138 DWtype
139 __subvDI3 (DWtype a, DWtype b)
141 DWtype w;
143 if (__builtin_sub_overflow (a, b, &w))
144 abort ();
146 return w;
148 #endif
150 #ifdef L_mulvsi3
151 Wtype
152 __mulvSI3 (Wtype a, Wtype b)
154 Wtype w;
156 if (__builtin_mul_overflow (a, b, &w))
157 abort ();
159 return w;
161 #ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
162 SItype
163 __mulvsi3 (SItype a, SItype b)
165 SItype w;
167 if (__builtin_mul_overflow (a, b, &w))
168 abort ();
170 return w;
172 #endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
173 #endif
175 #ifdef L_negvsi2
176 Wtype
177 __negvSI2 (Wtype a)
179 Wtype w;
181 if (__builtin_sub_overflow (0, a, &w))
182 abort ();
184 return w;
186 #ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
187 SItype
188 __negvsi2 (SItype a)
190 SItype w;
192 if (__builtin_sub_overflow (0, a, &w))
193 abort ();
195 return w;
197 #endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
198 #endif
200 #ifdef L_negvdi2
201 DWtype
202 __negvDI2 (DWtype a)
204 DWtype w;
206 if (__builtin_sub_overflow (0, a, &w))
207 abort ();
209 return w;
211 #endif
213 #ifdef L_absvsi2
214 Wtype
215 __absvSI2 (Wtype a)
217 const Wtype v = 0 - (a < 0);
218 Wtype w;
220 if (__builtin_add_overflow (a, v, &w))
221 abort ();
223 return v ^ w;
225 #ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
226 SItype
227 __absvsi2 (SItype a)
229 const SItype v = 0 - (a < 0);
230 SItype w;
232 if (__builtin_add_overflow (a, v, &w))
233 abort ();
235 return v ^ w;
237 #endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
238 #endif
240 #ifdef L_absvdi2
241 DWtype
242 __absvDI2 (DWtype a)
244 const DWtype v = 0 - (a < 0);
245 DWtype w;
247 if (__builtin_add_overflow (a, v, &w))
248 abort ();
250 return v ^ w;
252 #endif
254 #ifdef L_mulvdi3
255 DWtype
256 __mulvDI3 (DWtype u, DWtype v)
258 /* The unchecked multiplication needs 3 Wtype x Wtype multiplications,
259 but the checked multiplication needs only two. */
260 const DWunion uu = {.ll = u};
261 const DWunion vv = {.ll = v};
263 if (__builtin_expect (uu.s.high == uu.s.low >> (W_TYPE_SIZE - 1), 1))
265 /* u fits in a single Wtype. */
266 if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
268 /* v fits in a single Wtype as well. */
269 /* A single multiplication. No overflow risk. */
270 return (DWtype) uu.s.low * (DWtype) vv.s.low;
272 else
274 /* Two multiplications. */
275 DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
276 * (UDWtype) (UWtype) vv.s.low};
277 DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.low
278 * (UDWtype) (UWtype) vv.s.high};
280 if (vv.s.high < 0)
281 w1.s.high -= uu.s.low;
282 if (uu.s.low < 0)
283 w1.ll -= vv.ll;
284 w1.ll += (UWtype) w0.s.high;
285 if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
287 w0.s.high = w1.s.low;
288 return w0.ll;
292 else
294 if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
296 /* v fits into a single Wtype. */
297 /* Two multiplications. */
298 DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
299 * (UDWtype) (UWtype) vv.s.low};
300 DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.high
301 * (UDWtype) (UWtype) vv.s.low};
303 if (uu.s.high < 0)
304 w1.s.high -= vv.s.low;
305 if (vv.s.low < 0)
306 w1.ll -= uu.ll;
307 w1.ll += (UWtype) w0.s.high;
308 if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
310 w0.s.high = w1.s.low;
311 return w0.ll;
314 else
316 /* A few sign checks and a single multiplication. */
317 if (uu.s.high >= 0)
319 if (vv.s.high >= 0)
321 if (uu.s.high == 0 && vv.s.high == 0)
323 const DWtype w = (UDWtype) (UWtype) uu.s.low
324 * (UDWtype) (UWtype) vv.s.low;
325 if (__builtin_expect (w >= 0, 1))
326 return w;
329 else
331 if (uu.s.high == 0 && vv.s.high == (Wtype) -1)
333 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
334 * (UDWtype) (UWtype) vv.s.low};
336 ww.s.high -= uu.s.low;
337 if (__builtin_expect (ww.s.high < 0, 1))
338 return ww.ll;
342 else
344 if (vv.s.high >= 0)
346 if (uu.s.high == (Wtype) -1 && vv.s.high == 0)
348 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
349 * (UDWtype) (UWtype) vv.s.low};
351 ww.s.high -= vv.s.low;
352 if (__builtin_expect (ww.s.high < 0, 1))
353 return ww.ll;
356 else
358 if ((uu.s.high & vv.s.high) == (Wtype) -1
359 && (uu.s.low | vv.s.low) != 0)
361 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
362 * (UDWtype) (UWtype) vv.s.low};
364 ww.s.high -= uu.s.low;
365 ww.s.high -= vv.s.low;
366 if (__builtin_expect (ww.s.high >= 0, 1))
367 return ww.ll;
374 /* Overflow. */
375 abort ();
377 #endif
380 /* Unless shift functions are defined with full ANSI prototypes,
381 parameter b will be promoted to int if shift_count_type is smaller than an int. */
382 #ifdef L_lshrdi3
383 DWtype
384 __lshrdi3 (DWtype u, shift_count_type b)
386 if (b == 0)
387 return u;
389 const DWunion uu = {.ll = u};
390 const shift_count_type bm = W_TYPE_SIZE - b;
391 DWunion w;
393 if (bm <= 0)
395 w.s.high = 0;
396 w.s.low = (UWtype) uu.s.high >> -bm;
398 else
400 const UWtype carries = (UWtype) uu.s.high << bm;
402 w.s.high = (UWtype) uu.s.high >> b;
403 w.s.low = ((UWtype) uu.s.low >> b) | carries;
406 return w.ll;
408 #endif
410 #ifdef L_ashldi3
411 DWtype
412 __ashldi3 (DWtype u, shift_count_type b)
414 if (b == 0)
415 return u;
417 const DWunion uu = {.ll = u};
418 const shift_count_type bm = W_TYPE_SIZE - b;
419 DWunion w;
421 if (bm <= 0)
423 w.s.low = 0;
424 w.s.high = (UWtype) uu.s.low << -bm;
426 else
428 const UWtype carries = (UWtype) uu.s.low >> bm;
430 w.s.low = (UWtype) uu.s.low << b;
431 w.s.high = ((UWtype) uu.s.high << b) | carries;
434 return w.ll;
436 #endif
438 #ifdef L_ashrdi3
439 DWtype
440 __ashrdi3 (DWtype u, shift_count_type b)
442 if (b == 0)
443 return u;
445 const DWunion uu = {.ll = u};
446 const shift_count_type bm = W_TYPE_SIZE - b;
447 DWunion w;
449 if (bm <= 0)
451 /* w.s.high = 1..1 or 0..0 */
452 w.s.high = uu.s.high >> (W_TYPE_SIZE - 1);
453 w.s.low = uu.s.high >> -bm;
455 else
457 const UWtype carries = (UWtype) uu.s.high << bm;
459 w.s.high = uu.s.high >> b;
460 w.s.low = ((UWtype) uu.s.low >> b) | carries;
463 return w.ll;
465 #endif
467 #ifdef L_bswapsi2
468 SItype
469 __bswapsi2 (SItype u)
471 return ((((u) & 0xff000000u) >> 24)
472 | (((u) & 0x00ff0000u) >> 8)
473 | (((u) & 0x0000ff00u) << 8)
474 | (((u) & 0x000000ffu) << 24));
476 #endif
477 #ifdef L_bswapdi2
478 DItype
479 __bswapdi2 (DItype u)
481 return ((((u) & 0xff00000000000000ull) >> 56)
482 | (((u) & 0x00ff000000000000ull) >> 40)
483 | (((u) & 0x0000ff0000000000ull) >> 24)
484 | (((u) & 0x000000ff00000000ull) >> 8)
485 | (((u) & 0x00000000ff000000ull) << 8)
486 | (((u) & 0x0000000000ff0000ull) << 24)
487 | (((u) & 0x000000000000ff00ull) << 40)
488 | (((u) & 0x00000000000000ffull) << 56));
490 #endif
491 #ifdef L_ffssi2
492 #undef int
494 __ffsSI2 (UWtype u)
496 UWtype count;
498 if (u == 0)
499 return 0;
501 count_trailing_zeros (count, u);
502 return count + 1;
504 #endif
506 #ifdef L_ffsdi2
507 #undef int
509 __ffsDI2 (DWtype u)
511 const DWunion uu = {.ll = u};
512 UWtype word, count, add;
514 if (uu.s.low != 0)
515 word = uu.s.low, add = 0;
516 else if (uu.s.high != 0)
517 word = uu.s.high, add = W_TYPE_SIZE;
518 else
519 return 0;
521 count_trailing_zeros (count, word);
522 return count + add + 1;
524 #endif
526 #ifdef L_muldi3
527 DWtype
528 __muldi3 (DWtype u, DWtype v)
530 const DWunion uu = {.ll = u};
531 const DWunion vv = {.ll = v};
532 DWunion w = {.ll = __umulsidi3 (uu.s.low, vv.s.low)};
534 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
535 + (UWtype) uu.s.high * (UWtype) vv.s.low);
537 return w.ll;
539 #endif
541 #if (defined (L_udivdi3) || defined (L_divdi3) || \
542 defined (L_umoddi3) || defined (L_moddi3))
543 #if defined (sdiv_qrnnd)
544 #define L_udiv_w_sdiv
545 #endif
546 #endif
548 #ifdef L_udiv_w_sdiv
549 #if defined (sdiv_qrnnd)
550 #if (defined (L_udivdi3) || defined (L_divdi3) || \
551 defined (L_umoddi3) || defined (L_moddi3))
552 static inline __attribute__ ((__always_inline__))
553 #endif
554 UWtype
555 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
557 UWtype q, r;
558 UWtype c0, c1, b1;
560 if ((Wtype) d >= 0)
562 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
564 /* Dividend, divisor, and quotient are nonnegative. */
565 sdiv_qrnnd (q, r, a1, a0, d);
567 else
569 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d. */
570 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
571 /* Divide (c1*2^32 + c0) by d. */
572 sdiv_qrnnd (q, r, c1, c0, d);
573 /* Add 2^31 to quotient. */
574 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
577 else
579 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
580 c1 = a1 >> 1; /* A/2 */
581 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
583 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
585 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
587 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
588 if ((d & 1) != 0)
590 if (r >= q)
591 r = r - q;
592 else if (q - r <= d)
594 r = r - q + d;
595 q--;
597 else
599 r = r - q + 2*d;
600 q -= 2;
604 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
606 c1 = (b1 - 1) - c1;
607 c0 = ~c0; /* logical NOT */
609 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
611 q = ~q; /* (A/2)/b1 */
612 r = (b1 - 1) - r;
614 r = 2*r + (a0 & 1); /* A/(2*b1) */
616 if ((d & 1) != 0)
618 if (r >= q)
619 r = r - q;
620 else if (q - r <= d)
622 r = r - q + d;
623 q--;
625 else
627 r = r - q + 2*d;
628 q -= 2;
632 else /* Implies c1 = b1 */
633 { /* Hence a1 = d - 1 = 2*b1 - 1 */
634 if (a0 >= -d)
636 q = -1;
637 r = a0 + d;
639 else
641 q = -2;
642 r = a0 + 2*d;
647 *rp = r;
648 return q;
650 #else
651 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
652 UWtype
653 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
654 UWtype a1 __attribute__ ((__unused__)),
655 UWtype a0 __attribute__ ((__unused__)),
656 UWtype d __attribute__ ((__unused__)))
658 return 0;
660 #endif
661 #endif
663 #if (defined (L_udivdi3) || defined (L_divdi3) || \
664 defined (L_umoddi3) || defined (L_moddi3) || \
665 defined (L_divmoddi4))
666 #define L_udivmoddi4
667 #endif
669 #ifdef L_clz
670 const UQItype __clz_tab[256] =
672 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
673 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
674 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
675 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
676 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
677 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
678 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
679 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
681 #endif
683 #ifdef L_clzsi2
684 #undef int
686 __clzSI2 (UWtype x)
688 Wtype ret;
690 count_leading_zeros (ret, x);
692 return ret;
694 #endif
696 #ifdef L_clzdi2
697 #undef int
699 __clzDI2 (UDWtype x)
701 const DWunion uu = {.ll = x};
702 UWtype word;
703 Wtype ret, add;
705 if (uu.s.high)
706 word = uu.s.high, add = 0;
707 else
708 word = uu.s.low, add = W_TYPE_SIZE;
710 count_leading_zeros (ret, word);
711 return ret + add;
713 #endif
715 #ifdef L_ctzsi2
716 #undef int
718 __ctzSI2 (UWtype x)
720 Wtype ret;
722 count_trailing_zeros (ret, x);
724 return ret;
726 #endif
728 #ifdef L_ctzdi2
729 #undef int
731 __ctzDI2 (UDWtype x)
733 const DWunion uu = {.ll = x};
734 UWtype word;
735 Wtype ret, add;
737 if (uu.s.low)
738 word = uu.s.low, add = 0;
739 else
740 word = uu.s.high, add = W_TYPE_SIZE;
742 count_trailing_zeros (ret, word);
743 return ret + add;
745 #endif
747 #ifdef L_clrsbsi2
748 #undef int
750 __clrsbSI2 (Wtype x)
752 Wtype ret;
754 if (x < 0)
755 x = ~x;
756 if (x == 0)
757 return W_TYPE_SIZE - 1;
758 count_leading_zeros (ret, x);
759 return ret - 1;
761 #endif
763 #ifdef L_clrsbdi2
764 #undef int
766 __clrsbDI2 (DWtype x)
768 const DWunion uu = {.ll = x};
769 UWtype word;
770 Wtype ret, add;
772 if (uu.s.high == 0)
773 word = uu.s.low, add = W_TYPE_SIZE;
774 else if (uu.s.high == -1)
775 word = ~uu.s.low, add = W_TYPE_SIZE;
776 else if (uu.s.high >= 0)
777 word = uu.s.high, add = 0;
778 else
779 word = ~uu.s.high, add = 0;
781 if (word == 0)
782 ret = W_TYPE_SIZE;
783 else
784 count_leading_zeros (ret, word);
786 return ret + add - 1;
788 #endif
790 #ifdef L_popcount_tab
791 const UQItype __popcount_tab[256] =
793 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
794 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
795 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
796 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
797 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
798 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
799 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
800 3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8
802 #endif
804 #if defined(L_popcountsi2) || defined(L_popcountdi2)
805 #define POPCOUNTCST2(x) (((UWtype) x << __CHAR_BIT__) | x)
806 #define POPCOUNTCST4(x) (((UWtype) x << (2 * __CHAR_BIT__)) | x)
807 #define POPCOUNTCST8(x) (((UWtype) x << (4 * __CHAR_BIT__)) | x)
808 #if W_TYPE_SIZE == __CHAR_BIT__
809 #define POPCOUNTCST(x) x
810 #elif W_TYPE_SIZE == 2 * __CHAR_BIT__
811 #define POPCOUNTCST(x) POPCOUNTCST2 (x)
812 #elif W_TYPE_SIZE == 4 * __CHAR_BIT__
813 #define POPCOUNTCST(x) POPCOUNTCST4 (POPCOUNTCST2 (x))
814 #elif W_TYPE_SIZE == 8 * __CHAR_BIT__
815 #define POPCOUNTCST(x) POPCOUNTCST8 (POPCOUNTCST4 (POPCOUNTCST2 (x)))
816 #endif
817 #endif
819 #ifdef L_popcountsi2
820 #undef int
822 __popcountSI2 (UWtype x)
824 /* Force table lookup on targets like AVR and RL78 which only
825 pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
826 have 1, and other small word targets. */
827 #if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
828 x = x - ((x >> 1) & POPCOUNTCST (0x55));
829 x = (x & POPCOUNTCST (0x33)) + ((x >> 2) & POPCOUNTCST (0x33));
830 x = (x + (x >> 4)) & POPCOUNTCST (0x0F);
831 return (x * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
832 #else
833 int i, ret = 0;
835 for (i = 0; i < W_TYPE_SIZE; i += 8)
836 ret += __popcount_tab[(x >> i) & 0xff];
838 return ret;
839 #endif
841 #endif
843 #ifdef L_popcountdi2
844 #undef int
846 __popcountDI2 (UDWtype x)
848 /* Force table lookup on targets like AVR and RL78 which only
849 pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
850 have 1, and other small word targets. */
851 #if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
852 const DWunion uu = {.ll = x};
853 UWtype x1 = uu.s.low, x2 = uu.s.high;
854 x1 = x1 - ((x1 >> 1) & POPCOUNTCST (0x55));
855 x2 = x2 - ((x2 >> 1) & POPCOUNTCST (0x55));
856 x1 = (x1 & POPCOUNTCST (0x33)) + ((x1 >> 2) & POPCOUNTCST (0x33));
857 x2 = (x2 & POPCOUNTCST (0x33)) + ((x2 >> 2) & POPCOUNTCST (0x33));
858 x1 = (x1 + (x1 >> 4)) & POPCOUNTCST (0x0F);
859 x2 = (x2 + (x2 >> 4)) & POPCOUNTCST (0x0F);
860 x1 += x2;
861 return (x1 * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
862 #else
863 int i, ret = 0;
865 for (i = 0; i < 2*W_TYPE_SIZE; i += 8)
866 ret += __popcount_tab[(x >> i) & 0xff];
868 return ret;
869 #endif
871 #endif
873 #ifdef L_paritysi2
874 #undef int
876 __paritySI2 (UWtype x)
878 #if W_TYPE_SIZE > 64
879 # error "fill out the table"
880 #endif
881 #if W_TYPE_SIZE > 32
882 x ^= x >> 32;
883 #endif
884 #if W_TYPE_SIZE > 16
885 x ^= x >> 16;
886 #endif
887 x ^= x >> 8;
888 x ^= x >> 4;
889 x &= 0xf;
890 return (0x6996 >> x) & 1;
892 #endif
894 #ifdef L_paritydi2
895 #undef int
897 __parityDI2 (UDWtype x)
899 const DWunion uu = {.ll = x};
900 UWtype nx = uu.s.low ^ uu.s.high;
902 #if W_TYPE_SIZE > 64
903 # error "fill out the table"
904 #endif
905 #if W_TYPE_SIZE > 32
906 nx ^= nx >> 32;
907 #endif
908 #if W_TYPE_SIZE > 16
909 nx ^= nx >> 16;
910 #endif
911 nx ^= nx >> 8;
912 nx ^= nx >> 4;
913 nx &= 0xf;
914 return (0x6996 >> nx) & 1;
916 #endif
918 #ifdef L_udivmoddi4
919 #ifdef TARGET_HAS_NO_HW_DIVIDE
921 #if (defined (L_udivdi3) || defined (L_divdi3) || \
922 defined (L_umoddi3) || defined (L_moddi3) || \
923 defined (L_divmoddi4))
924 static inline __attribute__ ((__always_inline__))
925 #endif
926 UDWtype
927 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
929 UDWtype q = 0, r = n, y = d;
930 UWtype lz1, lz2, i, k;
932 /* Implements align divisor shift dividend method. This algorithm
933 aligns the divisor under the dividend and then perform number of
934 test-subtract iterations which shift the dividend left. Number of
935 iterations is k + 1 where k is the number of bit positions the
936 divisor must be shifted left to align it under the dividend.
937 quotient bits can be saved in the rightmost positions of the dividend
938 as it shifts left on each test-subtract iteration. */
940 if (y <= r)
942 lz1 = __builtin_clzll (d);
943 lz2 = __builtin_clzll (n);
945 k = lz1 - lz2;
946 y = (y << k);
948 /* Dividend can exceed 2 ^ (width - 1) - 1 but still be less than the
949 aligned divisor. Normal iteration can drops the high order bit
950 of the dividend. Therefore, first test-subtract iteration is a
951 special case, saving its quotient bit in a separate location and
952 not shifting the dividend. */
953 if (r >= y)
955 r = r - y;
956 q = (1ULL << k);
959 if (k > 0)
961 y = y >> 1;
963 /* k additional iterations where k regular test subtract shift
964 dividend iterations are done. */
965 i = k;
968 if (r >= y)
969 r = ((r - y) << 1) + 1;
970 else
971 r = (r << 1);
972 i = i - 1;
973 } while (i != 0);
975 /* First quotient bit is combined with the quotient bits resulting
976 from the k regular iterations. */
977 q = q + r;
978 r = r >> k;
979 q = q - (r << k);
983 if (rp)
984 *rp = r;
985 return q;
987 #else
989 #if (defined (L_udivdi3) || defined (L_divdi3) || \
990 defined (L_umoddi3) || defined (L_moddi3) || \
991 defined (L_divmoddi4))
992 static inline __attribute__ ((__always_inline__))
993 #endif
994 UDWtype
995 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
997 const DWunion nn = {.ll = n};
998 const DWunion dd = {.ll = d};
999 DWunion rr;
1000 UWtype d0, d1, n0, n1, n2;
1001 UWtype q0, q1;
1002 UWtype b, bm;
1004 d0 = dd.s.low;
1005 d1 = dd.s.high;
1006 n0 = nn.s.low;
1007 n1 = nn.s.high;
1009 #if !UDIV_NEEDS_NORMALIZATION
1010 if (d1 == 0)
1012 if (d0 > n1)
1014 /* 0q = nn / 0D */
1016 udiv_qrnnd (q0, n0, n1, n0, d0);
1017 q1 = 0;
1019 /* Remainder in n0. */
1021 else
1023 /* qq = NN / 0d */
1025 if (d0 == 0)
1026 d0 = 1 / d0; /* Divide intentionally by zero. */
1028 udiv_qrnnd (q1, n1, 0, n1, d0);
1029 udiv_qrnnd (q0, n0, n1, n0, d0);
1031 /* Remainder in n0. */
1034 if (rp != 0)
1036 rr.s.low = n0;
1037 rr.s.high = 0;
1038 *rp = rr.ll;
1042 #else /* UDIV_NEEDS_NORMALIZATION */
1044 if (d1 == 0)
1046 if (d0 > n1)
1048 /* 0q = nn / 0D */
1050 count_leading_zeros (bm, d0);
1052 if (bm != 0)
1054 /* Normalize, i.e. make the most significant bit of the
1055 denominator set. */
1057 d0 = d0 << bm;
1058 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
1059 n0 = n0 << bm;
1062 udiv_qrnnd (q0, n0, n1, n0, d0);
1063 q1 = 0;
1065 /* Remainder in n0 >> bm. */
1067 else
1069 /* qq = NN / 0d */
1071 if (d0 == 0)
1072 d0 = 1 / d0; /* Divide intentionally by zero. */
1074 count_leading_zeros (bm, d0);
1076 if (bm == 0)
1078 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
1079 conclude (the most significant bit of n1 is set) /\ (the
1080 leading quotient digit q1 = 1).
1082 This special case is necessary, not an optimization.
1083 (Shifts counts of W_TYPE_SIZE are undefined.) */
1085 n1 -= d0;
1086 q1 = 1;
1088 else
1090 /* Normalize. */
1092 b = W_TYPE_SIZE - bm;
1094 d0 = d0 << bm;
1095 n2 = n1 >> b;
1096 n1 = (n1 << bm) | (n0 >> b);
1097 n0 = n0 << bm;
1099 udiv_qrnnd (q1, n1, n2, n1, d0);
1102 /* n1 != d0... */
1104 udiv_qrnnd (q0, n0, n1, n0, d0);
1106 /* Remainder in n0 >> bm. */
1109 if (rp != 0)
1111 rr.s.low = n0 >> bm;
1112 rr.s.high = 0;
1113 *rp = rr.ll;
1116 #endif /* UDIV_NEEDS_NORMALIZATION */
1118 else
1120 if (d1 > n1)
1122 /* 00 = nn / DD */
1124 q0 = 0;
1125 q1 = 0;
1127 /* Remainder in n1n0. */
1128 if (rp != 0)
1130 rr.s.low = n0;
1131 rr.s.high = n1;
1132 *rp = rr.ll;
1135 else
1137 /* 0q = NN / dd */
1139 count_leading_zeros (bm, d1);
1140 if (bm == 0)
1142 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
1143 conclude (the most significant bit of n1 is set) /\ (the
1144 quotient digit q0 = 0 or 1).
1146 This special case is necessary, not an optimization. */
1148 /* The condition on the next line takes advantage of that
1149 n1 >= d1 (true due to program flow). */
1150 if (n1 > d1 || n0 >= d0)
1152 q0 = 1;
1153 sub_ddmmss (n1, n0, n1, n0, d1, d0);
1155 else
1156 q0 = 0;
1158 q1 = 0;
1160 if (rp != 0)
1162 rr.s.low = n0;
1163 rr.s.high = n1;
1164 *rp = rr.ll;
1167 else
1169 UWtype m1, m0;
1170 /* Normalize. */
1172 b = W_TYPE_SIZE - bm;
1174 d1 = (d1 << bm) | (d0 >> b);
1175 d0 = d0 << bm;
1176 n2 = n1 >> b;
1177 n1 = (n1 << bm) | (n0 >> b);
1178 n0 = n0 << bm;
1180 udiv_qrnnd (q0, n1, n2, n1, d1);
1181 umul_ppmm (m1, m0, q0, d0);
1183 if (m1 > n1 || (m1 == n1 && m0 > n0))
1185 q0--;
1186 sub_ddmmss (m1, m0, m1, m0, d1, d0);
1189 q1 = 0;
1191 /* Remainder in (n1n0 - m1m0) >> bm. */
1192 if (rp != 0)
1194 sub_ddmmss (n1, n0, n1, n0, m1, m0);
1195 rr.s.low = (n1 << b) | (n0 >> bm);
1196 rr.s.high = n1 >> bm;
1197 *rp = rr.ll;
1203 const DWunion ww = {{.low = q0, .high = q1}};
1204 return ww.ll;
1206 #endif
1207 #endif
1209 #ifdef L_divdi3
1210 DWtype
1211 __divdi3 (DWtype u, DWtype v)
1213 Wtype c = 0;
1214 DWunion uu = {.ll = u};
1215 DWunion vv = {.ll = v};
1216 DWtype w;
1218 if (uu.s.high < 0)
1219 c = ~c,
1220 uu.ll = -uu.ll;
1221 if (vv.s.high < 0)
1222 c = ~c,
1223 vv.ll = -vv.ll;
1225 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
1226 if (c)
1227 w = -w;
1229 return w;
1231 #endif
1233 #ifdef L_moddi3
1234 DWtype
1235 __moddi3 (DWtype u, DWtype v)
1237 Wtype c = 0;
1238 DWunion uu = {.ll = u};
1239 DWunion vv = {.ll = v};
1240 DWtype w;
1242 if (uu.s.high < 0)
1243 c = ~c,
1244 uu.ll = -uu.ll;
1245 if (vv.s.high < 0)
1246 vv.ll = -vv.ll;
1248 (void) __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&w);
1249 if (c)
1250 w = -w;
1252 return w;
1254 #endif
1256 #ifdef L_divmoddi4
1257 DWtype
1258 __divmoddi4 (DWtype u, DWtype v, DWtype *rp)
1260 Wtype c1 = 0, c2 = 0;
1261 DWunion uu = {.ll = u};
1262 DWunion vv = {.ll = v};
1263 DWtype w;
1264 DWtype r;
1266 if (uu.s.high < 0)
1267 c1 = ~c1, c2 = ~c2,
1268 uu.ll = -uu.ll;
1269 if (vv.s.high < 0)
1270 c1 = ~c1,
1271 vv.ll = -vv.ll;
1273 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&r);
1274 if (c1)
1275 w = -w;
1276 if (c2)
1277 r = -r;
1279 *rp = r;
1280 return w;
1282 #endif
1284 #ifdef L_umoddi3
1285 UDWtype
1286 __umoddi3 (UDWtype u, UDWtype v)
1288 UDWtype w;
1290 (void) __udivmoddi4 (u, v, &w);
1292 return w;
1294 #endif
1296 #ifdef L_udivdi3
1297 UDWtype
1298 __udivdi3 (UDWtype n, UDWtype d)
1300 return __udivmoddi4 (n, d, (UDWtype *) 0);
1302 #endif
1304 #if (defined(__BITINT_MAXWIDTH__) \
1305 && (defined(L_mulbitint3) || defined(L_divmodbitint4)))
1306 /* _BitInt support. */
1308 /* If *P is zero or sign extended (the latter only for PREC < 0) from
1309 some narrower _BitInt value, reduce precision. */
1311 static inline __attribute__((__always_inline__)) SItype
1312 bitint_reduce_prec (const UBILtype **p, SItype prec)
1314 UWtype mslimb;
1315 SItype i;
1316 if (prec < 0)
1318 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1319 i = 0;
1320 #else
1321 i = ((USItype) -1 - prec) / W_TYPE_SIZE;
1322 #endif
1323 mslimb = (*p)[i];
1324 if (mslimb & ((UWtype) 1 << (((USItype) -1 - prec) % W_TYPE_SIZE)))
1326 SItype n = ((USItype) -prec) % W_TYPE_SIZE;
1327 if (n)
1329 mslimb |= ((UWtype) -1 << (((USItype) -1 - prec) % W_TYPE_SIZE));
1330 if (mslimb == (UWtype) -1)
1332 prec += n;
1333 if (prec >= -1)
1334 return -2;
1335 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1336 ++p;
1337 #else
1338 --i;
1339 #endif
1340 mslimb = (*p)[i];
1341 n = 0;
1344 while (mslimb == (UWtype) -1)
1346 prec += W_TYPE_SIZE;
1347 if (prec >= -1)
1348 return -2;
1349 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1350 ++p;
1351 #else
1352 --i;
1353 #endif
1354 mslimb = (*p)[i];
1356 if (n == 0)
1358 if ((Wtype) mslimb >= 0)
1360 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1361 --p;
1362 #endif
1363 return prec - 1;
1366 return prec;
1368 else
1369 prec = -prec;
1371 else
1373 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1374 i = 0;
1375 #else
1376 i = ((USItype) prec - 1) / W_TYPE_SIZE;
1377 #endif
1378 mslimb = (*p)[i];
1380 SItype n = ((USItype) prec) % W_TYPE_SIZE;
1381 if (n)
1383 mslimb &= ((UWtype) 1 << (((USItype) prec) % W_TYPE_SIZE)) - 1;
1384 if (mslimb == 0)
1386 prec -= n;
1387 if (prec == 0)
1388 return 1;
1389 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1390 ++p;
1391 #else
1392 --i;
1393 #endif
1394 mslimb = (*p)[i];
1397 while (mslimb == 0)
1399 prec -= W_TYPE_SIZE;
1400 if (prec == 0)
1401 return 1;
1402 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1403 ++p;
1404 #else
1405 --i;
1406 #endif
1407 mslimb = (*p)[i];
1409 return prec;
1412 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1413 # define BITINT_INC -1
1414 # define BITINT_END(be, le) (be)
1415 #else
1416 # define BITINT_INC 1
1417 # define BITINT_END(be, le) (le)
1418 #endif
1420 #ifdef L_mulbitint3
1421 /* D = S * L. */
1423 static UWtype
1424 bitint_mul_1 (UBILtype *d, const UBILtype *s, UWtype l, SItype n)
1426 UWtype sv, hi, lo, c = 0;
1429 sv = *s;
1430 s += BITINT_INC;
1431 umul_ppmm (hi, lo, sv, l);
1432 c = __builtin_add_overflow (lo, c, &lo) + hi;
1433 *d = lo;
1434 d += BITINT_INC;
1436 while (--n);
1437 return c;
1440 /* D += S * L. */
1442 static UWtype
1443 bitint_addmul_1 (UBILtype *d, const UBILtype *s, UWtype l, SItype n)
1445 UWtype sv, hi, lo, c = 0;
1448 sv = *s;
1449 s += BITINT_INC;
1450 umul_ppmm (hi, lo, sv, l);
1451 hi += __builtin_add_overflow (lo, *d, &lo);
1452 c = __builtin_add_overflow (lo, c, &lo) + hi;
1453 *d = lo;
1454 d += BITINT_INC;
1456 while (--n);
1457 return c;
1460 /* If XPREC is positive, it is precision in bits
1461 of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
1462 full limbs and if Xprec%W_TYPE_SIZE one partial limb.
1463 If Xprec is negative, -XPREC is precision in bits
1464 of a signed _BitInt operand. RETPREC should be always
1465 positive. */
1467 void
1468 __mulbitint3 (UBILtype *ret, SItype retprec,
1469 const UBILtype *u, SItype uprec,
1470 const UBILtype *v, SItype vprec)
1472 uprec = bitint_reduce_prec (&u, uprec);
1473 vprec = bitint_reduce_prec (&v, vprec);
1474 USItype auprec = uprec < 0 ? -uprec : uprec;
1475 USItype avprec = vprec < 0 ? -vprec : vprec;
1477 /* Prefer non-negative U.
1478 Otherwise make sure V doesn't have higher precision than U. */
1479 if ((uprec < 0 && vprec >= 0)
1480 || (avprec > auprec && !(uprec >= 0 && vprec < 0)))
1482 SItype p;
1483 const UBILtype *t;
1484 p = uprec; uprec = vprec; vprec = p;
1485 p = auprec; auprec = avprec; avprec = p;
1486 t = u; u = v; v = t;
1489 USItype un = auprec / W_TYPE_SIZE;
1490 USItype un2 = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1491 USItype vn = avprec / W_TYPE_SIZE;
1492 USItype vn2 = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1493 USItype retn = ((USItype) retprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1494 USItype retidx, uidx, vidx;
1495 UWtype vv;
1496 /* Indexes of least significant limb. */
1497 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1498 retidx = retn - 1;
1499 uidx = un2 - 1;
1500 vidx = vn2 - 1;
1501 #else
1502 retidx = 0;
1503 uidx = 0;
1504 vidx = 0;
1505 #endif
1506 if (__builtin_expect (auprec <= W_TYPE_SIZE, 0) && vprec < 0)
1508 UWtype uu = u[uidx];
1509 if (__builtin_expect (auprec < W_TYPE_SIZE, 0))
1510 uu &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
1511 if (uu == 0)
1513 /* 0 * negative would be otherwise mishandled below, so
1514 handle it specially. */
1515 __builtin_memset (ret, 0, retn * sizeof (UWtype));
1516 return;
1519 vv = v[vidx];
1520 if (__builtin_expect (avprec < W_TYPE_SIZE, 0))
1522 if (vprec > 0)
1523 vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
1524 else
1525 vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
1528 USItype n = un > retn ? retn : un;
1529 USItype n2 = n;
1530 USItype retidx2 = retidx + n * BITINT_INC;
1531 UWtype c = 0, uv = 0;
1532 if (n)
1533 c = bitint_mul_1 (ret + retidx, u + uidx, vv, n);
1534 if (retn > un && un2 != un)
1536 UWtype hi, lo;
1537 uv = u[uidx + n * BITINT_INC];
1538 if (uprec > 0)
1539 uv &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
1540 else
1541 uv |= (UWtype) -1 << (auprec % W_TYPE_SIZE);
1542 umul_ppmm (hi, lo, uv, vv);
1543 c = __builtin_add_overflow (lo, c, &lo) + hi;
1544 ret[retidx2] = lo;
1545 retidx2 += BITINT_INC;
1546 ++n2;
1548 if (retn > un2)
1550 if (uprec < 0)
1552 while (n2 < retn)
1554 if (n2 >= un2 + vn2)
1555 break;
1556 UWtype hi, lo;
1557 umul_ppmm (hi, lo, (UWtype) -1, vv);
1558 c = __builtin_add_overflow (lo, c, &lo) + hi;
1559 ret[retidx2] = lo;
1560 retidx2 += BITINT_INC;
1561 ++n2;
1564 else
1566 ret[retidx2] = c;
1567 retidx2 += BITINT_INC;
1568 ++n2;
1570 /* If RET has more limbs than U after precision reduction,
1571 fill in the remaining limbs. */
1572 while (n2 < retn)
1574 if (n2 < un2 + vn2 || (uprec ^ vprec) >= 0)
1575 c = 0;
1576 else
1577 c = (UWtype) -1;
1578 ret[retidx2] = c;
1579 retidx2 += BITINT_INC;
1580 ++n2;
1583 /* N is now number of possibly non-zero limbs in RET (ignoring
1584 limbs above UN2 + VN2 which if any have been finalized already). */
1585 USItype end = vprec < 0 ? un2 + vn2 : vn2;
1586 if (retn > un2 + vn2) retn = un2 + vn2;
1587 if (end > retn) end = retn;
1588 for (USItype m = 1; m < end; ++m)
1590 retidx += BITINT_INC;
1591 vidx += BITINT_INC;
1592 if (m < vn2)
1594 vv = v[vidx];
1595 if (__builtin_expect (m == vn, 0))
1597 if (vprec > 0)
1598 vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
1599 else
1600 vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
1603 else
1604 vv = (UWtype) -1;
1605 if (m + n > retn)
1606 n = retn - m;
1607 c = 0;
1608 if (n)
1609 c = bitint_addmul_1 (ret + retidx, u + uidx, vv, n);
1610 n2 = m + n;
1611 retidx2 = retidx + n * BITINT_INC;
1612 if (n2 < retn && un2 != un)
1614 UWtype hi, lo;
1615 umul_ppmm (hi, lo, uv, vv);
1616 hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
1617 c = __builtin_add_overflow (lo, c, &lo) + hi;
1618 ret[retidx2] = lo;
1619 retidx2 += BITINT_INC;
1620 ++n2;
1622 if (uprec < 0)
1623 while (n2 < retn)
1625 UWtype hi, lo;
1626 umul_ppmm (hi, lo, (UWtype) -1, vv);
1627 hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
1628 c = __builtin_add_overflow (lo, c, &lo) + hi;
1629 ret[retidx2] = lo;
1630 retidx2 += BITINT_INC;
1631 ++n2;
1633 else if (n2 < retn)
1635 ret[retidx2] = c;
1636 retidx2 += BITINT_INC;
1640 #endif
1642 #ifdef L_divmodbitint4
1643 /* D = -S. */
1645 static void
1646 bitint_negate (UBILtype *d, const UBILtype *s, SItype n)
1648 UWtype c = 1;
1651 UWtype sv = *s, lo;
1652 s += BITINT_INC;
1653 c = __builtin_add_overflow (~sv, c, &lo);
1654 *d = lo;
1655 d += BITINT_INC;
1657 while (--n);
1660 /* D -= S * L. */
1662 static UWtype
1663 bitint_submul_1 (UBILtype *d, const UBILtype *s, UWtype l, SItype n)
1665 UWtype sv, hi, lo, c = 0;
1668 sv = *s;
1669 s += BITINT_INC;
1670 umul_ppmm (hi, lo, sv, l);
1671 hi += __builtin_sub_overflow (*d, lo, &lo);
1672 c = __builtin_sub_overflow (lo, c, &lo) + hi;
1673 *d = lo;
1674 d += BITINT_INC;
1676 while (--n);
1677 return c;
1680 /* If XPREC is positive, it is precision in bits
1681 of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
1682 full limbs and if Xprec%W_TYPE_SIZE one partial limb.
1683 If Xprec is negative, -XPREC is precision in bits
1684 of a signed _BitInt operand. QPREC and RPREC should be
1685 always non-negative. If either Q or R is NULL (at least
1686 one should be non-NULL), then corresponding QPREC or RPREC
1687 should be 0. */
1689 void
1690 __divmodbitint4 (UBILtype *q, SItype qprec,
1691 UBILtype *r, SItype rprec,
1692 const UBILtype *u, SItype uprec,
1693 const UBILtype *v, SItype vprec)
1695 uprec = bitint_reduce_prec (&u, uprec);
1696 vprec = bitint_reduce_prec (&v, vprec);
1697 USItype auprec = uprec < 0 ? -uprec : uprec;
1698 USItype avprec = vprec < 0 ? -vprec : vprec;
1699 USItype un = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1700 USItype vn = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1701 USItype qn = ((USItype) qprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1702 USItype rn = ((USItype) rprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1703 USItype up = auprec % W_TYPE_SIZE;
1704 USItype vp = avprec % W_TYPE_SIZE;
1705 if (__builtin_expect (un < vn, 0))
1707 /* If abs(v) > abs(u), then q is 0 and r is u. */
1708 if (q)
1709 __builtin_memset (q, 0, qn * sizeof (UWtype));
1710 if (r == NULL)
1711 return;
1712 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1713 r += rn - 1;
1714 u += un - 1;
1715 #endif
1716 if (up)
1717 --un;
1718 if (rn < un)
1719 un = rn;
1720 for (rn -= un; un; --un)
1722 *r = *u;
1723 r += BITINT_INC;
1724 u += BITINT_INC;
1726 if (!rn)
1727 return;
1728 if (up)
1730 if (uprec > 0)
1731 *r = *u & (((UWtype) 1 << up) - 1);
1732 else
1733 *r = *u | ((UWtype) -1 << up);
1734 r += BITINT_INC;
1735 if (!--rn)
1736 return;
1738 UWtype c = uprec < 0 ? (UWtype) -1 : (UWtype) 0;
1739 for (; rn; --rn)
1741 *r = c;
1742 r += BITINT_INC;
1744 return;
1746 USItype qn2 = un - vn + 1;
1747 if (qn >= qn2)
1748 qn2 = 0;
1749 USItype sz = un + 1 + vn + qn2;
1750 UBILtype *buf = __builtin_alloca (sz * sizeof (UWtype));
1751 USItype uidx, vidx;
1752 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1753 uidx = un - 1;
1754 vidx = vn - 1;
1755 #else
1756 uidx = 0;
1757 vidx = 0;
1758 #endif
1759 if (uprec < 0)
1760 bitint_negate (buf + BITINT_END (uidx + 1, 0), u + uidx, un);
1761 else
1762 __builtin_memcpy (buf + BITINT_END (1, 0), u, un * sizeof (UWtype));
1763 if (up)
1764 buf[BITINT_END (1, un - 1)] &= (((UWtype) 1 << up) - 1);
1765 if (vprec < 0)
1766 bitint_negate (buf + un + 1 + vidx, v + vidx, vn);
1767 else
1768 __builtin_memcpy (buf + un + 1, v, vn * sizeof (UWtype));
1769 if (vp)
1770 buf[un + 1 + BITINT_END (0, vn - 1)] &= (((UWtype) 1 << vp) - 1);
1771 UBILtype *u2 = buf;
1772 UBILtype *v2 = u2 + un + 1;
1773 UBILtype *q2 = v2 + vn;
1774 if (!qn2)
1775 q2 = q + BITINT_END (qn - (un - vn + 1), 0);
1777 /* Knuth's algorithm. See also ../gcc/wide-int.cc (divmod_internal_2). */
1779 #ifndef UDIV_NEEDS_NORMALIZATION
1780 /* Handle single limb divisor first. */
1781 if (vn == 1)
1783 UWtype vv = v2[0];
1784 if (vv == 0)
1785 vv = 1 / vv; /* Divide intentionally by zero. */
1786 UWtype k = 0;
1787 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1788 for (SItype i = 0; i <= un - 1; ++i)
1789 #else
1790 for (SItype i = un - 1; i >= 0; --i)
1791 #endif
1792 udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
1793 if (r != NULL)
1794 r[BITINT_END (rn - 1, 0)] = k;
1796 else
1797 #endif
1799 SItype s;
1800 #ifdef UDIV_NEEDS_NORMALIZATION
1801 if (vn == 1 && v2[0] == 0)
1802 s = 0;
1803 else
1804 #endif
1805 if (sizeof (0U) == sizeof (UWtype))
1806 s = __builtin_clz (v2[BITINT_END (0, vn - 1)]);
1807 else if (sizeof (0UL) == sizeof (UWtype))
1808 s = __builtin_clzl (v2[BITINT_END (0, vn - 1)]);
1809 else
1810 s = __builtin_clzll (v2[BITINT_END (0, vn - 1)]);
1811 if (s)
1813 /* Normalize by shifting v2 left so that it has msb set. */
1814 const SItype n = sizeof (UWtype) * __CHAR_BIT__;
1815 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1816 for (SItype i = 0; i < vn - 1; ++i)
1817 #else
1818 for (SItype i = vn - 1; i > 0; --i)
1819 #endif
1820 v2[i] = (v2[i] << s) | (v2[i - BITINT_INC] >> (n - s));
1821 v2[vidx] = v2[vidx] << s;
1822 /* And shift u2 left by the same amount. */
1823 u2[BITINT_END (0, un)] = u2[BITINT_END (1, un - 1)] >> (n - s);
1824 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1825 for (SItype i = 1; i < un; ++i)
1826 #else
1827 for (SItype i = un - 1; i > 0; --i)
1828 #endif
1829 u2[i] = (u2[i] << s) | (u2[i - BITINT_INC] >> (n - s));
1830 u2[BITINT_END (un, 0)] = u2[BITINT_END (un, 0)] << s;
1832 else
1833 u2[BITINT_END (0, un)] = 0;
1834 #ifdef UDIV_NEEDS_NORMALIZATION
1835 /* Handle single limb divisor first. */
1836 if (vn == 1)
1838 UWtype vv = v2[0];
1839 if (vv == 0)
1840 vv = 1 / vv; /* Divide intentionally by zero. */
1841 UWtype k = u2[BITINT_END (0, un)];
1842 #if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1843 for (SItype i = 0; i <= un - 1; ++i)
1844 #else
1845 for (SItype i = un - 1; i >= 0; --i)
1846 #endif
1847 udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
1848 if (r != NULL)
1849 r[BITINT_END (rn - 1, 0)] = k >> s;
1851 else
1852 #endif
1854 UWtype vv1 = v2[BITINT_END (0, vn - 1)];
1855 UWtype vv0 = v2[BITINT_END (1, vn - 2)];
1856 /* Main loop. */
1857 for (SItype j = un - vn; j >= 0; --j)
1859 /* Compute estimate in qhat. */
1860 UWtype uv1 = u2[BITINT_END (un - j - vn, j + vn)];
1861 UWtype uv0 = u2[BITINT_END (un - j - vn + 1, j + vn - 1)];
1862 UWtype qhat, rhat, hi, lo, c;
1863 if (uv1 >= vv1)
1865 /* udiv_qrnnd doesn't support quotients which don't
1866 fit into UWtype, while Knuth's algorithm originally
1867 uses a double-word by word to double-word division.
1868 Fortunately, the algorithm guarantees that uv1 <= vv1,
1869 because if uv1 > vv1, then even if v would have all
1870 bits in all words below vv1 set, the previous iteration
1871 would be supposed to use qhat larger by 1 and subtract
1872 v. With uv1 == vv1 and uv0 >= vv1 the double-word
1873 qhat in Knuth's algorithm would be 1 in the upper word
1874 and 1 in the lower word, say for
1875 uv1 0x8000000000000000ULL
1876 uv0 0xffffffffffffffffULL
1877 vv1 0x8000000000000000ULL
1878 0x8000000000000000ffffffffffffffffuwb
1879 / 0x8000000000000000uwb == 0x10000000000000001uwb, and
1880 exactly like that also for any other value
1881 > 0x8000000000000000ULL in uv1 and vv1 and uv0 >= uv1.
1882 So we need to subtract one or at most two vv1s from
1883 uv1:uv0 (qhat because of that decreases by 1 or 2 and
1884 is then representable in UWtype) and need to increase
1885 rhat by vv1 once or twice because of that. Now, if
1886 we need to subtract 2 vv1s, i.e. if
1887 uv1 == vv1 && uv0 >= vv1, then rhat (which is uv0 - vv1)
1888 + vv1 computation can't overflow, because it is equal
1889 to uv0 and therefore the original algorithm in that case
1890 performs goto again, but the second vv1 addition must
1891 overflow already because vv1 has msb set from the
1892 canonicalization. */
1893 uv1 -= __builtin_sub_overflow (uv0, vv1, &uv0);
1894 if (uv1 >= vv1)
1896 uv1 -= __builtin_sub_overflow (uv0, vv1, &uv0);
1897 udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
1898 rhat += 2 * vv1;
1900 else
1902 udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
1903 if (!__builtin_add_overflow (rhat, vv1, &rhat))
1904 goto again;
1907 else
1909 udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
1910 again:
1911 umul_ppmm (hi, lo, qhat, vv0);
1912 if (hi > rhat
1913 || (hi == rhat
1914 && lo > u2[BITINT_END (un - j - vn + 2,
1915 j + vn - 2)]))
1917 --qhat;
1918 if (!__builtin_add_overflow (rhat, vv1, &rhat))
1919 goto again;
1923 c = bitint_submul_1 (u2 + BITINT_END (un - j, j),
1924 v2 + BITINT_END (vn - 1, 0), qhat, vn);
1925 u2[BITINT_END (un - j - vn, j + vn)] -= c;
1926 /* If we've subtracted too much, decrease qhat and
1927 and add back. */
1928 if ((Wtype) u2[BITINT_END (un - j - vn, j + vn)] < 0)
1930 --qhat;
1931 c = 0;
1932 for (USItype i = 0; i < vn; ++i)
1934 UWtype s = v2[BITINT_END (vn - 1 - i, i)];
1935 UWtype d = u2[BITINT_END (un - i - j, i + j)];
1936 UWtype c1 = __builtin_add_overflow (d, s, &d);
1937 UWtype c2 = __builtin_add_overflow (d, c, &d);
1938 c = c1 + c2;
1939 u2[BITINT_END (un - i - j, i + j)] = d;
1941 u2[BITINT_END (un - j - vn, j + vn)] += c;
1943 q2[BITINT_END (un - vn - j, j)] = qhat;
1945 if (r != NULL)
1947 if (s)
1949 const SItype n = sizeof (UWtype) * __CHAR_BIT__;
1950 /* Unnormalize remainder. */
1951 USItype i;
1952 for (i = 0; i < vn && i < rn; ++i)
1953 r[BITINT_END (rn - 1 - i, i)]
1954 = ((u2[BITINT_END (un - i, i)] >> s)
1955 | (u2[BITINT_END (un - i - 1, i + 1)] << (n - s)));
1956 if (i < rn)
1957 r[BITINT_END (rn - vn, vn - 1)]
1958 = u2[BITINT_END (un - vn + 1, vn - 1)] >> s;
1960 else if (rn > vn)
1961 __builtin_memcpy (&r[BITINT_END (rn - vn, 0)],
1962 &u2[BITINT_END (un + 1 - vn, 0)],
1963 vn * sizeof (UWtype));
1964 else
1965 __builtin_memcpy (&r[0], &u2[BITINT_END (un + 1 - rn, 0)],
1966 rn * sizeof (UWtype));
1970 if (q != NULL)
1972 if ((uprec < 0) ^ (vprec < 0))
1974 /* Negative quotient. */
1975 USItype n;
1976 if (un - vn + 1 > qn)
1977 n = qn;
1978 else
1979 n = un - vn + 1;
1980 bitint_negate (q + BITINT_END (qn - 1, 0),
1981 q2 + BITINT_END (un - vn, 0), n);
1982 if (qn > n)
1983 __builtin_memset (q + BITINT_END (0, n), -1,
1984 (qn - n) * sizeof (UWtype));
1986 else
1988 /* Positive quotient. */
1989 if (qn2)
1990 __builtin_memcpy (q, q2 + BITINT_END (un - vn + 1 - qn, 0),
1991 qn * sizeof (UWtype));
1992 else if (qn > un - vn + 1)
1993 __builtin_memset (q + BITINT_END (0, un - vn + 1), 0,
1994 (qn - (un - vn + 1)) * sizeof (UWtype));
1997 if (r != NULL)
1999 if (uprec < 0)
2001 /* Negative remainder. */
2002 bitint_negate (r + BITINT_END (rn - 1, 0),
2003 r + BITINT_END (rn - 1, 0),
2004 rn > vn ? vn : rn);
2005 if (rn > vn)
2006 __builtin_memset (r + BITINT_END (0, vn), -1,
2007 (rn - vn) * sizeof (UWtype));
2009 else
2011 /* Positive remainder. */
2012 if (rn > vn)
2013 __builtin_memset (r + BITINT_END (0, vn), 0,
2014 (rn - vn) * sizeof (UWtype));
2018 #endif
2019 #endif
2021 #ifdef L_cmpdi2
2022 cmp_return_type
2023 __cmpdi2 (DWtype a, DWtype b)
2025 return (a > b) - (a < b) + 1;
2027 #endif
2029 #ifdef L_ucmpdi2
2030 cmp_return_type
2031 __ucmpdi2 (UDWtype a, UDWtype b)
2033 return (a > b) - (a < b) + 1;
2035 #endif
2037 #if defined(L_fixunstfdi) && LIBGCC2_HAS_TF_MODE
2038 UDWtype
2039 __fixunstfDI (TFtype a)
2041 if (a < 0)
2042 return 0;
2044 /* Compute high word of result, as a flonum. */
2045 const TFtype b = (a / Wtype_MAXp1_F);
2046 /* Convert that to fixed (but not to DWtype!),
2047 and shift it into the high word. */
2048 UDWtype v = (UWtype) b;
2049 v <<= W_TYPE_SIZE;
2050 /* Remove high part from the TFtype, leaving the low part as flonum. */
2051 a -= (TFtype)v;
2052 /* Convert that to fixed (but not to DWtype!) and add it in.
2053 Sometimes A comes out negative. This is significant, since
2054 A has more bits than a long int does. */
2055 if (a < 0)
2056 v -= (UWtype) (- a);
2057 else
2058 v += (UWtype) a;
2059 return v;
2061 #endif
2063 #if defined(L_fixtfdi) && LIBGCC2_HAS_TF_MODE
2064 DWtype
2065 __fixtfdi (TFtype a)
2067 if (a < 0)
2068 return - __fixunstfDI (-a);
2069 return __fixunstfDI (a);
2071 #endif
2073 #if defined(L_fixunsxfdi) && LIBGCC2_HAS_XF_MODE
2074 UDWtype
2075 __fixunsxfDI (XFtype a)
2077 if (a < 0)
2078 return 0;
2080 /* Compute high word of result, as a flonum. */
2081 const XFtype b = (a / Wtype_MAXp1_F);
2082 /* Convert that to fixed (but not to DWtype!),
2083 and shift it into the high word. */
2084 UDWtype v = (UWtype) b;
2085 v <<= W_TYPE_SIZE;
2086 /* Remove high part from the XFtype, leaving the low part as flonum. */
2087 a -= (XFtype)v;
2088 /* Convert that to fixed (but not to DWtype!) and add it in.
2089 Sometimes A comes out negative. This is significant, since
2090 A has more bits than a long int does. */
2091 if (a < 0)
2092 v -= (UWtype) (- a);
2093 else
2094 v += (UWtype) a;
2095 return v;
2097 #endif
2099 #if defined(L_fixxfdi) && LIBGCC2_HAS_XF_MODE
2100 DWtype
2101 __fixxfdi (XFtype a)
2103 if (a < 0)
2104 return - __fixunsxfDI (-a);
2105 return __fixunsxfDI (a);
2107 #endif
2109 #if defined(L_fixunsdfdi) && LIBGCC2_HAS_DF_MODE
2110 UDWtype
2111 __fixunsdfDI (DFtype a)
2113 /* Get high part of result. The division here will just moves the radix
2114 point and will not cause any rounding. Then the conversion to integral
2115 type chops result as desired. */
2116 const UWtype hi = a / Wtype_MAXp1_F;
2118 /* Get low part of result. Convert `hi' to floating type and scale it back,
2119 then subtract this from the number being converted. This leaves the low
2120 part. Convert that to integral type. */
2121 const UWtype lo = a - (DFtype) hi * Wtype_MAXp1_F;
2123 /* Assemble result from the two parts. */
2124 return ((UDWtype) hi << W_TYPE_SIZE) | lo;
2126 #endif
2128 #if defined(L_fixdfdi) && LIBGCC2_HAS_DF_MODE
2129 DWtype
2130 __fixdfdi (DFtype a)
2132 if (a < 0)
2133 return - __fixunsdfDI (-a);
2134 return __fixunsdfDI (a);
2136 #endif
2138 #if defined(L_fixunssfdi) && LIBGCC2_HAS_SF_MODE
2139 UDWtype
2140 __fixunssfDI (SFtype a)
2142 #if LIBGCC2_HAS_DF_MODE
2143 /* Convert the SFtype to a DFtype, because that is surely not going
2144 to lose any bits. Some day someone else can write a faster version
2145 that avoids converting to DFtype, and verify it really works right. */
2146 const DFtype dfa = a;
2148 /* Get high part of result. The division here will just moves the radix
2149 point and will not cause any rounding. Then the conversion to integral
2150 type chops result as desired. */
2151 const UWtype hi = dfa / Wtype_MAXp1_F;
2153 /* Get low part of result. Convert `hi' to floating type and scale it back,
2154 then subtract this from the number being converted. This leaves the low
2155 part. Convert that to integral type. */
2156 const UWtype lo = dfa - (DFtype) hi * Wtype_MAXp1_F;
2158 /* Assemble result from the two parts. */
2159 return ((UDWtype) hi << W_TYPE_SIZE) | lo;
2160 #elif FLT_MANT_DIG < W_TYPE_SIZE
2161 if (a < 1)
2162 return 0;
2163 if (a < Wtype_MAXp1_F)
2164 return (UWtype)a;
2165 if (a < Wtype_MAXp1_F * Wtype_MAXp1_F)
2167 /* Since we know that there are fewer significant bits in the SFmode
2168 quantity than in a word, we know that we can convert out all the
2169 significant bits in one step, and thus avoid losing bits. */
2171 /* ??? This following loop essentially performs frexpf. If we could
2172 use the real libm function, or poke at the actual bits of the fp
2173 format, it would be significantly faster. */
2175 UWtype shift = 0, counter;
2176 SFtype msb;
2178 a /= Wtype_MAXp1_F;
2179 for (counter = W_TYPE_SIZE / 2; counter != 0; counter >>= 1)
2181 SFtype counterf = (UWtype)1 << counter;
2182 if (a >= counterf)
2184 shift |= counter;
2185 a /= counterf;
2189 /* Rescale into the range of one word, extract the bits of that
2190 one word, and shift the result into position. */
2191 a *= Wtype_MAXp1_F;
2192 counter = a;
2193 return (DWtype)counter << shift;
2195 return -1;
2196 #else
2197 # error
2198 #endif
2200 #endif
2202 #if defined(L_fixsfdi) && LIBGCC2_HAS_SF_MODE
2203 DWtype
2204 __fixsfdi (SFtype a)
2206 if (a < 0)
2207 return - __fixunssfDI (-a);
2208 return __fixunssfDI (a);
2210 #endif
2212 #if defined(L_floatdixf) && LIBGCC2_HAS_XF_MODE
2213 XFtype
2214 __floatdixf (DWtype u)
2216 #if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
2217 # error
2218 #endif
2219 XFtype d = (Wtype) (u >> W_TYPE_SIZE);
2220 d *= Wtype_MAXp1_F;
2221 d += (UWtype)u;
2222 return d;
2224 #endif
2226 #if defined(L_floatundixf) && LIBGCC2_HAS_XF_MODE
2227 XFtype
2228 __floatundixf (UDWtype u)
2230 #if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
2231 # error
2232 #endif
2233 XFtype d = (UWtype) (u >> W_TYPE_SIZE);
2234 d *= Wtype_MAXp1_F;
2235 d += (UWtype)u;
2236 return d;
2238 #endif
2240 #if defined(L_floatditf) && LIBGCC2_HAS_TF_MODE
2241 TFtype
2242 __floatditf (DWtype u)
2244 #if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
2245 # error
2246 #endif
2247 TFtype d = (Wtype) (u >> W_TYPE_SIZE);
2248 d *= Wtype_MAXp1_F;
2249 d += (UWtype)u;
2250 return d;
2252 #endif
2254 #if defined(L_floatunditf) && LIBGCC2_HAS_TF_MODE
2255 TFtype
2256 __floatunditf (UDWtype u)
2258 #if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
2259 # error
2260 #endif
2261 TFtype d = (UWtype) (u >> W_TYPE_SIZE);
2262 d *= Wtype_MAXp1_F;
2263 d += (UWtype)u;
2264 return d;
2266 #endif
2268 #if (defined(L_floatdisf) && LIBGCC2_HAS_SF_MODE) \
2269 || (defined(L_floatdidf) && LIBGCC2_HAS_DF_MODE)
2270 #define DI_SIZE (W_TYPE_SIZE * 2)
2271 #define F_MODE_OK(SIZE) \
2272 (SIZE < DI_SIZE \
2273 && SIZE > (DI_SIZE - SIZE + FSSIZE) \
2274 && !AVOID_FP_TYPE_CONVERSION(SIZE))
2275 #if defined(L_floatdisf)
2276 #define FUNC __floatdisf
2277 #define FSTYPE SFtype
2278 #define FSSIZE __LIBGCC_SF_MANT_DIG__
2279 #else
2280 #define FUNC __floatdidf
2281 #define FSTYPE DFtype
2282 #define FSSIZE __LIBGCC_DF_MANT_DIG__
2283 #endif
2285 FSTYPE
2286 FUNC (DWtype u)
2288 #if FSSIZE >= W_TYPE_SIZE
2289 /* When the word size is small, we never get any rounding error. */
2290 FSTYPE f = (Wtype) (u >> W_TYPE_SIZE);
2291 f *= Wtype_MAXp1_F;
2292 f += (UWtype)u;
2293 return f;
2294 #elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
2295 || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
2296 || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2298 #if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
2299 # define FSIZE __LIBGCC_DF_MANT_DIG__
2300 # define FTYPE DFtype
2301 #elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
2302 # define FSIZE __LIBGCC_XF_MANT_DIG__
2303 # define FTYPE XFtype
2304 #elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2305 # define FSIZE __LIBGCC_TF_MANT_DIG__
2306 # define FTYPE TFtype
2307 #else
2308 # error
2309 #endif
2311 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
2313 /* Protect against double-rounding error.
2314 Represent any low-order bits, that might be truncated by a bit that
2315 won't be lost. The bit can go in anywhere below the rounding position
2316 of the FSTYPE. A fixed mask and bit position handles all usual
2317 configurations. */
2318 if (! (- ((DWtype) 1 << FSIZE) < u
2319 && u < ((DWtype) 1 << FSIZE)))
2321 if ((UDWtype) u & (REP_BIT - 1))
2323 u &= ~ (REP_BIT - 1);
2324 u |= REP_BIT;
2328 /* Do the calculation in a wider type so that we don't lose any of
2329 the precision of the high word while multiplying it. */
2330 FTYPE f = (Wtype) (u >> W_TYPE_SIZE);
2331 f *= Wtype_MAXp1_F;
2332 f += (UWtype)u;
2333 return (FSTYPE) f;
2334 #else
2335 #if FSSIZE >= W_TYPE_SIZE - 2
2336 # error
2337 #endif
2338 /* Finally, the word size is larger than the number of bits in the
2339 required FSTYPE, and we've got no suitable wider type. The only
2340 way to avoid double rounding is to special case the
2341 extraction. */
2343 /* If there are no high bits set, fall back to one conversion. */
2344 if ((Wtype)u == u)
2345 return (FSTYPE)(Wtype)u;
2347 /* Otherwise, find the power of two. */
2348 Wtype hi = u >> W_TYPE_SIZE;
2349 if (hi < 0)
2350 hi = -(UWtype) hi;
2352 UWtype count, shift;
2353 #if !defined (COUNT_LEADING_ZEROS_0) || COUNT_LEADING_ZEROS_0 != W_TYPE_SIZE
2354 if (hi == 0)
2355 count = W_TYPE_SIZE;
2356 else
2357 #endif
2358 count_leading_zeros (count, hi);
2360 /* No leading bits means u == minimum. */
2361 if (count == 0)
2362 return Wtype_MAXp1_F * (FSTYPE) (hi | ((UWtype) u != 0));
2364 shift = 1 + W_TYPE_SIZE - count;
2366 /* Shift down the most significant bits. */
2367 hi = u >> shift;
2369 /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
2370 if ((UWtype)u << (W_TYPE_SIZE - shift))
2371 hi |= 1;
2373 /* Convert the one word of data, and rescale. */
2374 FSTYPE f = hi, e;
2375 if (shift == W_TYPE_SIZE)
2376 e = Wtype_MAXp1_F;
2377 /* The following two cases could be merged if we knew that the target
2378 supported a native unsigned->float conversion. More often, we only
2379 have a signed conversion, and have to add extra fixup code. */
2380 else if (shift == W_TYPE_SIZE - 1)
2381 e = Wtype_MAXp1_F / 2;
2382 else
2383 e = (Wtype)1 << shift;
2384 return f * e;
2385 #endif
2387 #endif
2389 #if (defined(L_floatundisf) && LIBGCC2_HAS_SF_MODE) \
2390 || (defined(L_floatundidf) && LIBGCC2_HAS_DF_MODE)
2391 #define DI_SIZE (W_TYPE_SIZE * 2)
2392 #define F_MODE_OK(SIZE) \
2393 (SIZE < DI_SIZE \
2394 && SIZE > (DI_SIZE - SIZE + FSSIZE) \
2395 && !AVOID_FP_TYPE_CONVERSION(SIZE))
2396 #if defined(L_floatundisf)
2397 #define FUNC __floatundisf
2398 #define FSTYPE SFtype
2399 #define FSSIZE __LIBGCC_SF_MANT_DIG__
2400 #else
2401 #define FUNC __floatundidf
2402 #define FSTYPE DFtype
2403 #define FSSIZE __LIBGCC_DF_MANT_DIG__
2404 #endif
2406 FSTYPE
2407 FUNC (UDWtype u)
2409 #if FSSIZE >= W_TYPE_SIZE
2410 /* When the word size is small, we never get any rounding error. */
2411 FSTYPE f = (UWtype) (u >> W_TYPE_SIZE);
2412 f *= Wtype_MAXp1_F;
2413 f += (UWtype)u;
2414 return f;
2415 #elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
2416 || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
2417 || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2419 #if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
2420 # define FSIZE __LIBGCC_DF_MANT_DIG__
2421 # define FTYPE DFtype
2422 #elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
2423 # define FSIZE __LIBGCC_XF_MANT_DIG__
2424 # define FTYPE XFtype
2425 #elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2426 # define FSIZE __LIBGCC_TF_MANT_DIG__
2427 # define FTYPE TFtype
2428 #else
2429 # error
2430 #endif
2432 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
2434 /* Protect against double-rounding error.
2435 Represent any low-order bits, that might be truncated by a bit that
2436 won't be lost. The bit can go in anywhere below the rounding position
2437 of the FSTYPE. A fixed mask and bit position handles all usual
2438 configurations. */
2439 if (u >= ((UDWtype) 1 << FSIZE))
2441 if ((UDWtype) u & (REP_BIT - 1))
2443 u &= ~ (REP_BIT - 1);
2444 u |= REP_BIT;
2448 /* Do the calculation in a wider type so that we don't lose any of
2449 the precision of the high word while multiplying it. */
2450 FTYPE f = (UWtype) (u >> W_TYPE_SIZE);
2451 f *= Wtype_MAXp1_F;
2452 f += (UWtype)u;
2453 return (FSTYPE) f;
2454 #else
2455 #if FSSIZE == W_TYPE_SIZE - 1
2456 # error
2457 #endif
2458 /* Finally, the word size is larger than the number of bits in the
2459 required FSTYPE, and we've got no suitable wider type. The only
2460 way to avoid double rounding is to special case the
2461 extraction. */
2463 /* If there are no high bits set, fall back to one conversion. */
2464 if ((UWtype)u == u)
2465 return (FSTYPE)(UWtype)u;
2467 /* Otherwise, find the power of two. */
2468 UWtype hi = u >> W_TYPE_SIZE;
2470 UWtype count, shift;
2471 count_leading_zeros (count, hi);
2473 shift = W_TYPE_SIZE - count;
2475 /* Shift down the most significant bits. */
2476 hi = u >> shift;
2478 /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
2479 if ((UWtype)u << (W_TYPE_SIZE - shift))
2480 hi |= 1;
2482 /* Convert the one word of data, and rescale. */
2483 FSTYPE f = hi, e;
2484 if (shift == W_TYPE_SIZE)
2485 e = Wtype_MAXp1_F;
2486 /* The following two cases could be merged if we knew that the target
2487 supported a native unsigned->float conversion. More often, we only
2488 have a signed conversion, and have to add extra fixup code. */
2489 else if (shift == W_TYPE_SIZE - 1)
2490 e = Wtype_MAXp1_F / 2;
2491 else
2492 e = (Wtype)1 << shift;
2493 return f * e;
2494 #endif
2496 #endif
2498 #if defined(L_fixunsxfsi) && LIBGCC2_HAS_XF_MODE
2499 UWtype
2500 __fixunsxfSI (XFtype a)
2502 if (a >= - (DFtype) Wtype_MIN)
2503 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
2504 return (Wtype) a;
2506 #endif
2508 #if defined(L_fixunsdfsi) && LIBGCC2_HAS_DF_MODE
2509 UWtype
2510 __fixunsdfSI (DFtype a)
2512 if (a >= - (DFtype) Wtype_MIN)
2513 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
2514 return (Wtype) a;
2516 #endif
2518 #if defined(L_fixunssfsi) && LIBGCC2_HAS_SF_MODE
2519 UWtype
2520 __fixunssfSI (SFtype a)
2522 if (a >= - (SFtype) Wtype_MIN)
2523 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
2524 return (Wtype) a;
2526 #endif
2528 /* Integer power helper used from __builtin_powi for non-constant
2529 exponents. */
2531 #if (defined(L_powisf2) && LIBGCC2_HAS_SF_MODE) \
2532 || (defined(L_powidf2) && LIBGCC2_HAS_DF_MODE) \
2533 || (defined(L_powixf2) && LIBGCC2_HAS_XF_MODE) \
2534 || (defined(L_powitf2) && LIBGCC2_HAS_TF_MODE)
2535 # if defined(L_powisf2)
2536 # define TYPE SFtype
2537 # define NAME __powisf2
2538 # elif defined(L_powidf2)
2539 # define TYPE DFtype
2540 # define NAME __powidf2
2541 # elif defined(L_powixf2)
2542 # define TYPE XFtype
2543 # define NAME __powixf2
2544 # elif defined(L_powitf2)
2545 # define TYPE TFtype
2546 # define NAME __powitf2
2547 # endif
2549 #undef int
2550 #undef unsigned
2551 TYPE
2552 NAME (TYPE x, int m)
2554 unsigned int n = m < 0 ? -(unsigned int) m : (unsigned int) m;
2555 TYPE y = n % 2 ? x : 1;
2556 while (n >>= 1)
2558 x = x * x;
2559 if (n % 2)
2560 y = y * x;
2562 return m < 0 ? 1/y : y;
2565 #endif
2567 #if((defined(L_mulhc3) || defined(L_divhc3)) && LIBGCC2_HAS_HF_MODE) \
2568 || ((defined(L_mulsc3) || defined(L_divsc3)) && LIBGCC2_HAS_SF_MODE) \
2569 || ((defined(L_muldc3) || defined(L_divdc3)) && LIBGCC2_HAS_DF_MODE) \
2570 || ((defined(L_mulxc3) || defined(L_divxc3)) && LIBGCC2_HAS_XF_MODE) \
2571 || ((defined(L_multc3) || defined(L_divtc3)) && LIBGCC2_HAS_TF_MODE)
2573 #undef float
2574 #undef double
2575 #undef long
2577 #if defined(L_mulhc3) || defined(L_divhc3)
2578 # define MTYPE HFtype
2579 # define CTYPE HCtype
2580 # define AMTYPE SFtype
2581 # define MODE hc
2582 # define CEXT __LIBGCC_HF_FUNC_EXT__
2583 # define NOTRUNC (!__LIBGCC_HF_EXCESS_PRECISION__)
2584 #elif defined(L_mulsc3) || defined(L_divsc3)
2585 # define MTYPE SFtype
2586 # define CTYPE SCtype
2587 # define AMTYPE DFtype
2588 # define MODE sc
2589 # define CEXT __LIBGCC_SF_FUNC_EXT__
2590 # define NOTRUNC (!__LIBGCC_SF_EXCESS_PRECISION__)
2591 # define RBIG (__LIBGCC_SF_MAX__ / 2)
2592 # define RMIN (__LIBGCC_SF_MIN__)
2593 # define RMIN2 (__LIBGCC_SF_EPSILON__)
2594 # define RMINSCAL (1 / __LIBGCC_SF_EPSILON__)
2595 # define RMAX2 (RBIG * RMIN2)
2596 #elif defined(L_muldc3) || defined(L_divdc3)
2597 # define MTYPE DFtype
2598 # define CTYPE DCtype
2599 # define MODE dc
2600 # define CEXT __LIBGCC_DF_FUNC_EXT__
2601 # define NOTRUNC (!__LIBGCC_DF_EXCESS_PRECISION__)
2602 # define RBIG (__LIBGCC_DF_MAX__ / 2)
2603 # define RMIN (__LIBGCC_DF_MIN__)
2604 # define RMIN2 (__LIBGCC_DF_EPSILON__)
2605 # define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
2606 # define RMAX2 (RBIG * RMIN2)
2607 #elif defined(L_mulxc3) || defined(L_divxc3)
2608 # define MTYPE XFtype
2609 # define CTYPE XCtype
2610 # define MODE xc
2611 # define CEXT __LIBGCC_XF_FUNC_EXT__
2612 # define NOTRUNC (!__LIBGCC_XF_EXCESS_PRECISION__)
2613 # define RBIG (__LIBGCC_XF_MAX__ / 2)
2614 # define RMIN (__LIBGCC_XF_MIN__)
2615 # define RMIN2 (__LIBGCC_XF_EPSILON__)
2616 # define RMINSCAL (1 / __LIBGCC_XF_EPSILON__)
2617 # define RMAX2 (RBIG * RMIN2)
2618 #elif defined(L_multc3) || defined(L_divtc3)
2619 # define MTYPE TFtype
2620 # define CTYPE TCtype
2621 # define MODE tc
2622 # define CEXT __LIBGCC_TF_FUNC_EXT__
2623 # define NOTRUNC (!__LIBGCC_TF_EXCESS_PRECISION__)
2624 # if __LIBGCC_TF_MANT_DIG__ == 106
2625 # define RBIG (__LIBGCC_DF_MAX__ / 2)
2626 # define RMIN (__LIBGCC_DF_MIN__)
2627 # define RMIN2 (__LIBGCC_DF_EPSILON__)
2628 # define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
2629 # else
2630 # define RBIG (__LIBGCC_TF_MAX__ / 2)
2631 # define RMIN (__LIBGCC_TF_MIN__)
2632 # define RMIN2 (__LIBGCC_TF_EPSILON__)
2633 # define RMINSCAL (1 / __LIBGCC_TF_EPSILON__)
2634 # endif
2635 # define RMAX2 (RBIG * RMIN2)
2636 #else
2637 # error
2638 #endif
2640 #define CONCAT3(A,B,C) _CONCAT3(A,B,C)
2641 #define _CONCAT3(A,B,C) A##B##C
2643 #define CONCAT2(A,B) _CONCAT2(A,B)
2644 #define _CONCAT2(A,B) A##B
2646 #define isnan(x) __builtin_isnan (x)
2647 #define isfinite(x) __builtin_isfinite (x)
2648 #define isinf(x) __builtin_isinf (x)
2650 #define INFINITY CONCAT2(__builtin_huge_val, CEXT) ()
2651 #define I 1i
2653 /* Helpers to make the following code slightly less gross. */
2654 #define COPYSIGN CONCAT2(__builtin_copysign, CEXT)
2655 #define FABS CONCAT2(__builtin_fabs, CEXT)
2657 /* Verify that MTYPE matches up with CEXT. */
2658 extern void *compile_type_assert[sizeof(INFINITY) == sizeof(MTYPE) ? 1 : -1];
2660 /* Ensure that we've lost any extra precision. */
2661 #if NOTRUNC
2662 # define TRUNC(x)
2663 #else
2664 # define TRUNC(x) __asm__ ("" : "=m"(x) : "m"(x))
2665 #endif
2667 #if defined(L_mulhc3) || defined(L_mulsc3) || defined(L_muldc3) \
2668 || defined(L_mulxc3) || defined(L_multc3)
2670 CTYPE
2671 CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
2673 MTYPE ac, bd, ad, bc, x, y;
2674 CTYPE res;
2676 ac = a * c;
2677 bd = b * d;
2678 ad = a * d;
2679 bc = b * c;
2681 TRUNC (ac);
2682 TRUNC (bd);
2683 TRUNC (ad);
2684 TRUNC (bc);
2686 x = ac - bd;
2687 y = ad + bc;
2689 if (isnan (x) && isnan (y))
2691 /* Recover infinities that computed as NaN + iNaN. */
2692 _Bool recalc = 0;
2693 if (isinf (a) || isinf (b))
2695 /* z is infinite. "Box" the infinity and change NaNs in
2696 the other factor to 0. */
2697 a = COPYSIGN (isinf (a) ? 1 : 0, a);
2698 b = COPYSIGN (isinf (b) ? 1 : 0, b);
2699 if (isnan (c)) c = COPYSIGN (0, c);
2700 if (isnan (d)) d = COPYSIGN (0, d);
2701 recalc = 1;
2703 if (isinf (c) || isinf (d))
2705 /* w is infinite. "Box" the infinity and change NaNs in
2706 the other factor to 0. */
2707 c = COPYSIGN (isinf (c) ? 1 : 0, c);
2708 d = COPYSIGN (isinf (d) ? 1 : 0, d);
2709 if (isnan (a)) a = COPYSIGN (0, a);
2710 if (isnan (b)) b = COPYSIGN (0, b);
2711 recalc = 1;
2713 if (!recalc
2714 && (isinf (ac) || isinf (bd)
2715 || isinf (ad) || isinf (bc)))
2717 /* Recover infinities from overflow by changing NaNs to 0. */
2718 if (isnan (a)) a = COPYSIGN (0, a);
2719 if (isnan (b)) b = COPYSIGN (0, b);
2720 if (isnan (c)) c = COPYSIGN (0, c);
2721 if (isnan (d)) d = COPYSIGN (0, d);
2722 recalc = 1;
2724 if (recalc)
2726 x = INFINITY * (a * c - b * d);
2727 y = INFINITY * (a * d + b * c);
2731 __real__ res = x;
2732 __imag__ res = y;
2733 return res;
2735 #endif /* complex multiply */
2737 #if defined(L_divhc3) || defined(L_divsc3) || defined(L_divdc3) \
2738 || defined(L_divxc3) || defined(L_divtc3)
2740 CTYPE
2741 CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
2743 #if defined(L_divhc3) \
2744 || (defined(L_divsc3) && defined(__LIBGCC_HAVE_HWDBL__) )
2746 /* Half precision is handled with float precision.
2747 float is handled with double precision when double precision
2748 hardware is available.
2749 Due to the additional precision, the simple complex divide
2750 method (without Smith's method) is sufficient to get accurate
2751 answers and runs slightly faster than Smith's method. */
2753 AMTYPE aa, bb, cc, dd;
2754 AMTYPE denom;
2755 MTYPE x, y;
2756 CTYPE res;
2757 aa = a;
2758 bb = b;
2759 cc = c;
2760 dd = d;
2762 denom = (cc * cc) + (dd * dd);
2763 x = ((aa * cc) + (bb * dd)) / denom;
2764 y = ((bb * cc) - (aa * dd)) / denom;
2766 #else
2767 MTYPE denom, ratio, x, y;
2768 CTYPE res;
2770 /* double, extended, long double have significant potential
2771 underflow/overflow errors that can be greatly reduced with
2772 a limited number of tests and adjustments. float is handled
2773 the same way when no HW double is available.
2776 /* Scale by max(c,d) to reduce chances of denominator overflowing. */
2777 if (FABS (c) < FABS (d))
2779 /* Prevent underflow when denominator is near max representable. */
2780 if (FABS (d) >= RBIG)
2782 a = a / 2;
2783 b = b / 2;
2784 c = c / 2;
2785 d = d / 2;
2787 /* Avoid overflow/underflow issues when c and d are small.
2788 Scaling up helps avoid some underflows.
2789 No new overflow possible since c&d < RMIN2. */
2790 if (FABS (d) < RMIN2)
2792 a = a * RMINSCAL;
2793 b = b * RMINSCAL;
2794 c = c * RMINSCAL;
2795 d = d * RMINSCAL;
2797 else
2799 if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2))
2800 || ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
2801 && (FABS (d) < RMAX2)))
2803 a = a * RMINSCAL;
2804 b = b * RMINSCAL;
2805 c = c * RMINSCAL;
2806 d = d * RMINSCAL;
2809 ratio = c / d;
2810 denom = (c * ratio) + d;
2811 /* Choose alternate order of computation if ratio is subnormal. */
2812 if (FABS (ratio) > RMIN)
2814 x = ((a * ratio) + b) / denom;
2815 y = ((b * ratio) - a) / denom;
2817 else
2819 x = ((c * (a / d)) + b) / denom;
2820 y = ((c * (b / d)) - a) / denom;
2823 else
2825 /* Prevent underflow when denominator is near max representable. */
2826 if (FABS (c) >= RBIG)
2828 a = a / 2;
2829 b = b / 2;
2830 c = c / 2;
2831 d = d / 2;
2833 /* Avoid overflow/underflow issues when both c and d are small.
2834 Scaling up helps avoid some underflows.
2835 No new overflow possible since both c&d are less than RMIN2. */
2836 if (FABS (c) < RMIN2)
2838 a = a * RMINSCAL;
2839 b = b * RMINSCAL;
2840 c = c * RMINSCAL;
2841 d = d * RMINSCAL;
2843 else
2845 if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (c) < RMAX2))
2846 || ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
2847 && (FABS (c) < RMAX2)))
2849 a = a * RMINSCAL;
2850 b = b * RMINSCAL;
2851 c = c * RMINSCAL;
2852 d = d * RMINSCAL;
2855 ratio = d / c;
2856 denom = (d * ratio) + c;
2857 /* Choose alternate order of computation if ratio is subnormal. */
2858 if (FABS (ratio) > RMIN)
2860 x = ((b * ratio) + a) / denom;
2861 y = (b - (a * ratio)) / denom;
2863 else
2865 x = (a + (d * (b / c))) / denom;
2866 y = (b - (d * (a / c))) / denom;
2869 #endif
2871 /* Recover infinities and zeros that computed as NaN+iNaN; the only
2872 cases are nonzero/zero, infinite/finite, and finite/infinite. */
2873 if (isnan (x) && isnan (y))
2875 if (c == 0.0 && d == 0.0 && (!isnan (a) || !isnan (b)))
2877 x = COPYSIGN (INFINITY, c) * a;
2878 y = COPYSIGN (INFINITY, c) * b;
2880 else if ((isinf (a) || isinf (b)) && isfinite (c) && isfinite (d))
2882 a = COPYSIGN (isinf (a) ? 1 : 0, a);
2883 b = COPYSIGN (isinf (b) ? 1 : 0, b);
2884 x = INFINITY * (a * c + b * d);
2885 y = INFINITY * (b * c - a * d);
2887 else if ((isinf (c) || isinf (d)) && isfinite (a) && isfinite (b))
2889 c = COPYSIGN (isinf (c) ? 1 : 0, c);
2890 d = COPYSIGN (isinf (d) ? 1 : 0, d);
2891 x = 0.0 * (a * c + b * d);
2892 y = 0.0 * (b * c - a * d);
2896 __real__ res = x;
2897 __imag__ res = y;
2898 return res;
2900 #endif /* complex divide */
2902 #endif /* all complex float routines */
2904 /* From here on down, the routines use normal data types. */
2906 #define SItype bogus_type
2907 #define USItype bogus_type
2908 #define DItype bogus_type
2909 #define UDItype bogus_type
2910 #define SFtype bogus_type
2911 #define DFtype bogus_type
2912 #undef Wtype
2913 #undef UWtype
2914 #undef HWtype
2915 #undef UHWtype
2916 #undef DWtype
2917 #undef UDWtype
2919 #undef char
2920 #undef short
2921 #undef int
2922 #undef long
2923 #undef unsigned
2924 #undef float
2925 #undef double
2927 #ifdef L__gcc_bcmp
2929 /* Like bcmp except the sign is meaningful.
2930 Result is negative if S1 is less than S2,
2931 positive if S1 is greater, 0 if S1 and S2 are equal. */
2934 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
2936 while (size > 0)
2938 const unsigned char c1 = *s1++, c2 = *s2++;
2939 if (c1 != c2)
2940 return c1 - c2;
2941 size--;
2943 return 0;
2946 #endif
2948 /* __eprintf used to be used by GCC's private version of <assert.h>.
2949 We no longer provide that header, but this routine remains in libgcc.a
2950 for binary backward compatibility. Note that it is not included in
2951 the shared version of libgcc. */
2952 #ifdef L_eprintf
2953 #ifndef inhibit_libc
2955 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2956 #include <stdio.h>
2958 void
2959 __eprintf (const char *string, const char *expression,
2960 unsigned int line, const char *filename)
2962 fprintf (stderr, string, expression, line, filename);
2963 fflush (stderr);
2964 abort ();
2967 #endif
2968 #endif
2971 #ifdef L_clear_cache
2972 /* Clear part of an instruction cache. */
2974 void
2975 __clear_cache (void *beg __attribute__((__unused__)),
2976 void *end __attribute__((__unused__)))
2978 #ifdef CLEAR_INSN_CACHE
2979 /* Cast the void* pointers to char* as some implementations
2980 of the macro assume the pointers can be subtracted from
2981 one another. */
2982 CLEAR_INSN_CACHE ((char *) beg, (char *) end);
2983 #endif /* CLEAR_INSN_CACHE */
2986 #endif /* L_clear_cache */
2988 #ifdef L_trampoline
2990 /* Jump to a trampoline, loading the static chain address. */
2992 #if defined(WINNT) && ! defined(__CYGWIN__)
2993 #define WIN32_LEAN_AND_MEAN
2994 #include <windows.h>
2995 int getpagesize (void);
2996 int mprotect (char *,int, int);
2999 getpagesize (void)
3001 #ifdef _ALPHA_
3002 return 8192;
3003 #else
3004 return 4096;
3005 #endif
3009 mprotect (char *addr, int len, int prot)
3011 DWORD np, op;
3013 if (prot == 7)
3014 np = 0x40;
3015 else if (prot == 5)
3016 np = 0x20;
3017 else if (prot == 4)
3018 np = 0x10;
3019 else if (prot == 3)
3020 np = 0x04;
3021 else if (prot == 1)
3022 np = 0x02;
3023 else if (prot == 0)
3024 np = 0x01;
3025 else
3026 return -1;
3028 if (VirtualProtect (addr, len, np, &op))
3029 return 0;
3030 else
3031 return -1;
3034 #endif /* WINNT && ! __CYGWIN__ */
3036 #ifdef TRANSFER_FROM_TRAMPOLINE
3037 TRANSFER_FROM_TRAMPOLINE
3038 #endif
3039 #endif /* L_trampoline */
3041 #ifndef __CYGWIN__
3042 #ifdef L__main
3044 #include "gbl-ctors.h"
3046 /* Some systems use __main in a way incompatible with its use in gcc, in these
3047 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
3048 give the same symbol without quotes for an alternative entry point. You
3049 must define both, or neither. */
3050 #ifndef NAME__MAIN
3051 #define NAME__MAIN "__main"
3052 #define SYMBOL__MAIN __main
3053 #endif
3055 #if defined (__LIBGCC_INIT_SECTION_ASM_OP__) \
3056 || defined (__LIBGCC_INIT_ARRAY_SECTION_ASM_OP__)
3057 #undef HAS_INIT_SECTION
3058 #define HAS_INIT_SECTION
3059 #endif
3061 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
3063 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
3064 code to run constructors. In that case, we need to handle EH here, too.
3065 But MINGW32 is special because it handles CRTSTUFF and EH on its own. */
3067 #ifdef __MINGW32__
3068 #undef __LIBGCC_EH_FRAME_SECTION_NAME__
3069 #endif
3071 #ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
3072 #include "unwind-dw2-fde.h"
3073 extern unsigned char __EH_FRAME_BEGIN__[];
3074 #endif
3076 /* Run all the global destructors on exit from the program. */
3078 void
3079 __do_global_dtors (void)
3081 #ifdef DO_GLOBAL_DTORS_BODY
3082 DO_GLOBAL_DTORS_BODY;
3083 #else
3084 static func_ptr *p = __DTOR_LIST__ + 1;
3085 while (*p)
3087 p++;
3088 (*(p-1)) ();
3090 #endif
3091 #if defined (__LIBGCC_EH_FRAME_SECTION_NAME__) && !defined (HAS_INIT_SECTION)
3093 static int completed = 0;
3094 if (! completed)
3096 completed = 1;
3097 __deregister_frame_info (__EH_FRAME_BEGIN__);
3100 #endif
3102 #endif
3104 #ifndef HAS_INIT_SECTION
3105 /* Run all the global constructors on entry to the program. */
3107 void
3108 __do_global_ctors (void)
3110 #ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
3112 static struct object object;
3113 __register_frame_info (__EH_FRAME_BEGIN__, &object);
3115 #endif
3116 DO_GLOBAL_CTORS_BODY;
3117 atexit (__do_global_dtors);
3119 #endif /* no HAS_INIT_SECTION */
3121 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3122 /* Subroutine called automatically by `main'.
3123 Compiling a global function named `main'
3124 produces an automatic call to this function at the beginning.
3126 For many systems, this routine calls __do_global_ctors.
3127 For systems which support a .init section we use the .init section
3128 to run __do_global_ctors, so we need not do anything here. */
3130 extern void SYMBOL__MAIN (void);
3131 void
3132 SYMBOL__MAIN (void)
3134 /* Support recursive calls to `main': run initializers just once. */
3135 static int initialized;
3136 if (! initialized)
3138 initialized = 1;
3139 __do_global_ctors ();
3142 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3144 #endif /* L__main */
3145 #endif /* __CYGWIN__ */
3147 #ifdef L_ctors
3149 #include "gbl-ctors.h"
3151 /* Provide default definitions for the lists of constructors and
3152 destructors, so that we don't get linker errors. These symbols are
3153 intentionally bss symbols, so that gld and/or collect will provide
3154 the right values. */
3156 /* We declare the lists here with two elements each,
3157 so that they are valid empty lists if no other definition is loaded.
3159 If we are using the old "set" extensions to have the gnu linker
3160 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3161 must be in the bss/common section.
3163 Long term no port should use those extensions. But many still do. */
3164 #if !defined(__LIBGCC_INIT_SECTION_ASM_OP__)
3165 #if defined (TARGET_ASM_CONSTRUCTOR) || defined (USE_COLLECT2)
3166 func_ptr __CTOR_LIST__[2] = {0, 0};
3167 func_ptr __DTOR_LIST__[2] = {0, 0};
3168 #else
3169 func_ptr __CTOR_LIST__[2];
3170 func_ptr __DTOR_LIST__[2];
3171 #endif
3172 #endif /* no __LIBGCC_INIT_SECTION_ASM_OP__ */
3173 #endif /* L_ctors */
3174 #endif /* LIBGCC2_UNITS_PER_WORD <= MIN_UNITS_PER_WORD */