1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 92, 94, 95, 96, 1997 Free Software Foundation, Inc.
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA. */
20 #define SI_TYPE_SIZE 32
23 #define __BITS4 (SI_TYPE_SIZE / 4)
24 #define __ll_B (1L << (SI_TYPE_SIZE / 2))
25 #define __ll_lowpart(t) ((USItype) (t) % __ll_B)
26 #define __ll_highpart(t) ((USItype) (t) / __ll_B)
28 /* Define auxiliary asm macros.
30 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
31 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
32 and generates a two-part USItype product in HIGH_PROD and
35 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
36 and returns a UDItype product. This is just a variant of umul_ppmm.
38 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
39 denominator) divides a two-word unsigned integer, composed by the
40 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
41 places the quotient in QUOTIENT and the remainder in REMAINDER.
42 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
43 If, in addition, the most significant bit of DENOMINATOR must be 1,
44 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
46 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
47 denominator). Like udiv_qrnnd but the numbers are signed. The
48 quotient is rounded towards 0.
50 5) count_leading_zeros(count, x) counts the number of zero-bits from
51 the msb to the first non-zero bit. This is the number of steps X
52 needs to be shifted left to set the msb. Undefined for X == 0.
54 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
55 high_addend_2, low_addend_2) adds two two-word unsigned integers,
56 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
57 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
58 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
61 7) sub_ddmmss(high_difference, low_difference, high_minuend,
62 low_minuend, high_subtrahend, low_subtrahend) subtracts two
63 two-word unsigned integers, composed by HIGH_MINUEND_1 and
64 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
65 respectively. The result is placed in HIGH_DIFFERENCE and
66 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
69 If any of these macros are left undefined for a particular CPU,
72 /* The CPUs come in alphabetical order below.
74 Please add support for more CPUs here, or improve the current support
76 (E.g. WE32100, IBM360.) */
78 #if defined (__GNUC__) && !defined (NO_ASM)
80 /* We sometimes need to clobber "cc" with gcc2, but that would not be
81 understood by gcc1. Use cpp to avoid major code duplication. */
84 #define __AND_CLOBBER_CC
85 #else /* __GNUC__ >= 2 */
86 #define __CLOBBER_CC : "cc"
87 #define __AND_CLOBBER_CC , "cc"
88 #endif /* __GNUC__ < 2 */
90 #if defined (__a29k__) || defined (_AM29K)
91 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
92 __asm__ ("add %1,%4,%5
94 : "=r" ((USItype) (sh)), \
95 "=&r" ((USItype) (sl)) \
96 : "%r" ((USItype) (ah)), \
97 "rI" ((USItype) (bh)), \
98 "%r" ((USItype) (al)), \
99 "rI" ((USItype) (bl)))
100 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
101 __asm__ ("sub %1,%4,%5
103 : "=r" ((USItype) (sh)), \
104 "=&r" ((USItype) (sl)) \
105 : "r" ((USItype) (ah)), \
106 "rI" ((USItype) (bh)), \
107 "r" ((USItype) (al)), \
108 "rI" ((USItype) (bl)))
109 #define umul_ppmm(xh, xl, m0, m1) \
111 USItype __m0 = (m0), __m1 = (m1); \
112 __asm__ ("multiplu %0,%1,%2" \
113 : "=r" ((USItype) (xl)) \
116 __asm__ ("multmu %0,%1,%2" \
117 : "=r" ((USItype) (xh)) \
121 #define udiv_qrnnd(q, r, n1, n0, d) \
122 __asm__ ("dividu %0,%3,%4" \
123 : "=r" ((USItype) (q)), \
124 "=q" ((USItype) (r)) \
125 : "1" ((USItype) (n1)), \
126 "r" ((USItype) (n0)), \
128 #define count_leading_zeros(count, x) \
129 __asm__ ("clz %0,%1" \
130 : "=r" ((USItype) (count)) \
131 : "r" ((USItype) (x)))
132 #endif /* __a29k__ */
134 #if defined (__arc__)
135 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
136 __asm__ ("add.f %1, %4, %5
138 : "=r" ((USItype) (sh)), \
139 "=&r" ((USItype) (sl)) \
140 : "%r" ((USItype) (ah)), \
141 "rIJ" ((USItype) (bh)), \
142 "%r" ((USItype) (al)), \
143 "rIJ" ((USItype) (bl)))
144 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
145 __asm__ ("sub.f %1, %4, %5
147 : "=r" ((USItype) (sh)), \
148 "=&r" ((USItype) (sl)) \
149 : "r" ((USItype) (ah)), \
150 "rIJ" ((USItype) (bh)), \
151 "r" ((USItype) (al)), \
152 "rIJ" ((USItype) (bl)))
153 /* Call libgcc1 routine. */
154 #define umul_ppmm(w1, w0, u, v) \
157 __w.ll = __umulsidi3 (u, v); \
161 #define __umulsidi3 __umulsidi3
162 UDItype
__umulsidi3 (USItype
, USItype
);
165 #if defined (__arm__)
166 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
167 __asm__ ("adds %1, %4, %5
169 : "=r" ((USItype) (sh)), \
170 "=&r" ((USItype) (sl)) \
171 : "%r" ((USItype) (ah)), \
172 "rI" ((USItype) (bh)), \
173 "%r" ((USItype) (al)), \
174 "rI" ((USItype) (bl)))
175 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
176 __asm__ ("subs %1, %4, %5
178 : "=r" ((USItype) (sh)), \
179 "=&r" ((USItype) (sl)) \
180 : "r" ((USItype) (ah)), \
181 "rI" ((USItype) (bh)), \
182 "r" ((USItype) (al)), \
183 "rI" ((USItype) (bl)))
184 #define umul_ppmm(xh, xl, a, b) \
185 {register USItype __t0, __t1, __t2; \
186 __asm__ ("%@ Inlined umul_ppmm
189 bic %3, %5, %2, lsl #16
190 bic %4, %6, %0, lsl #16
197 adds %1, %1, %3, lsl #16
198 adc %0, %0, %3, lsr #16" \
199 : "=&r" ((USItype) (xh)), \
200 "=r" ((USItype) (xl)), \
201 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
202 : "r" ((USItype) (a)), \
203 "r" ((USItype) (b)));}
205 #define UDIV_TIME 100
208 #if defined (__clipper__)
209 #define umul_ppmm(w1, w0, u, v) \
210 ({union {UDItype __ll; \
211 struct {USItype __l, __h;} __i; \
213 __asm__ ("mulwux %2,%0" \
215 : "%0" ((USItype) (u)), \
216 "r" ((USItype) (v))); \
217 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
218 #define smul_ppmm(w1, w0, u, v) \
219 ({union {DItype __ll; \
220 struct {SItype __l, __h;} __i; \
222 __asm__ ("mulwx %2,%0" \
224 : "%0" ((SItype) (u)), \
225 "r" ((SItype) (v))); \
226 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
227 #define __umulsidi3(u, v) \
229 __asm__ ("mulwux %2,%0" \
231 : "%0" ((USItype) (u)), \
232 "r" ((USItype) (v))); \
234 #endif /* __clipper__ */
236 #if defined (__gmicro__)
237 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
238 __asm__ ("add.w %5,%1
240 : "=g" ((USItype) (sh)), \
241 "=&g" ((USItype) (sl)) \
242 : "%0" ((USItype) (ah)), \
243 "g" ((USItype) (bh)), \
244 "%1" ((USItype) (al)), \
245 "g" ((USItype) (bl)))
246 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
247 __asm__ ("sub.w %5,%1
249 : "=g" ((USItype) (sh)), \
250 "=&g" ((USItype) (sl)) \
251 : "0" ((USItype) (ah)), \
252 "g" ((USItype) (bh)), \
253 "1" ((USItype) (al)), \
254 "g" ((USItype) (bl)))
255 #define umul_ppmm(ph, pl, m0, m1) \
256 __asm__ ("mulx %3,%0,%1" \
257 : "=g" ((USItype) (ph)), \
258 "=r" ((USItype) (pl)) \
259 : "%0" ((USItype) (m0)), \
260 "g" ((USItype) (m1)))
261 #define udiv_qrnnd(q, r, nh, nl, d) \
262 __asm__ ("divx %4,%0,%1" \
263 : "=g" ((USItype) (q)), \
264 "=r" ((USItype) (r)) \
265 : "1" ((USItype) (nh)), \
266 "0" ((USItype) (nl)), \
268 #define count_leading_zeros(count, x) \
269 __asm__ ("bsch/1 %1,%0" \
271 : "g" ((USItype) (x)), \
276 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
277 __asm__ ("add %4,%5,%1
279 : "=r" ((USItype) (sh)), \
280 "=&r" ((USItype) (sl)) \
281 : "%rM" ((USItype) (ah)), \
282 "rM" ((USItype) (bh)), \
283 "%rM" ((USItype) (al)), \
284 "rM" ((USItype) (bl)))
285 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
286 __asm__ ("sub %4,%5,%1
288 : "=r" ((USItype) (sh)), \
289 "=&r" ((USItype) (sl)) \
290 : "rM" ((USItype) (ah)), \
291 "rM" ((USItype) (bh)), \
292 "rM" ((USItype) (al)), \
293 "rM" ((USItype) (bl)))
294 #if defined (_PA_RISC1_1)
295 #define umul_ppmm(w1, w0, u, v) \
300 struct {USItype __w1, __w0;} __w1w0; \
302 __asm__ ("xmpyu %1,%2,%0" \
304 : "x" ((USItype) (u)), \
305 "x" ((USItype) (v))); \
306 (w1) = __t.__w1w0.__w1; \
307 (w0) = __t.__w1w0.__w0; \
314 #define count_leading_zeros(count, x) \
319 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
320 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
321 ldo 16(%0),%0 ; Yes. Perform add.
322 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
323 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
324 ldo 8(%0),%0 ; Yes. Perform add.
325 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
326 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
327 ldo 4(%0),%0 ; Yes. Perform add.
328 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
329 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
330 ldo 2(%0),%0 ; Yes. Perform add.
331 extru %1,30,1,%1 ; Extract bit 1.
332 sub %0,%1,%0 ; Subtract it.
333 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
337 #if defined (__i386__) || defined (__i486__)
338 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
341 : "=r" ((USItype) (sh)), \
342 "=&r" ((USItype) (sl)) \
343 : "%0" ((USItype) (ah)), \
344 "g" ((USItype) (bh)), \
345 "%1" ((USItype) (al)), \
346 "g" ((USItype) (bl)))
347 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
350 : "=r" ((USItype) (sh)), \
351 "=&r" ((USItype) (sl)) \
352 : "0" ((USItype) (ah)), \
353 "g" ((USItype) (bh)), \
354 "1" ((USItype) (al)), \
355 "g" ((USItype) (bl)))
356 #define umul_ppmm(w1, w0, u, v) \
358 : "=a" ((USItype) (w0)), \
359 "=d" ((USItype) (w1)) \
360 : "%0" ((USItype) (u)), \
361 "rm" ((USItype) (v)))
362 #define udiv_qrnnd(q, r, n1, n0, d) \
364 : "=a" ((USItype) (q)), \
365 "=d" ((USItype) (r)) \
366 : "0" ((USItype) (n0)), \
367 "1" ((USItype) (n1)), \
368 "rm" ((USItype) (d)))
369 #define count_leading_zeros(count, x) \
372 __asm__ ("bsrl %1,%0" \
373 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
374 (count) = __cbtmp ^ 31; \
380 #if defined (__i860__)
382 /* Make sure these patterns really improve the code before
383 switching them on. */
384 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
389 struct {USItype __l, __h;} __i; \
391 __a.__i.__l = (al); \
392 __a.__i.__h = (ah); \
393 __b.__i.__l = (bl); \
394 __b.__i.__h = (bh); \
395 __asm__ ("fiadd.dd %1,%2,%0" \
397 : "%f" (__a.__ll), "f" (__b.__ll)); \
398 (sh) = __s.__i.__h; \
399 (sl) = __s.__i.__l; \
401 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
406 struct {USItype __l, __h;} __i; \
408 __a.__i.__l = (al); \
409 __a.__i.__h = (ah); \
410 __b.__i.__l = (bl); \
411 __b.__i.__h = (bh); \
412 __asm__ ("fisub.dd %1,%2,%0" \
414 : "%f" (__a.__ll), "f" (__b.__ll)); \
415 (sh) = __s.__i.__h; \
416 (sl) = __s.__i.__l; \
419 #endif /* __i860__ */
421 #if defined (__i960__)
422 #define umul_ppmm(w1, w0, u, v) \
423 ({union {UDItype __ll; \
424 struct {USItype __l, __h;} __i; \
426 __asm__ ("emul %2,%1,%0" \
428 : "%dI" ((USItype) (u)), \
429 "dI" ((USItype) (v))); \
430 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
431 #define __umulsidi3(u, v) \
433 __asm__ ("emul %2,%1,%0" \
435 : "%dI" ((USItype) (u)), \
436 "dI" ((USItype) (v))); \
438 #endif /* __i960__ */
440 #if defined (__M32R__)
441 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
442 /* The cmp clears the condition bit. */ \
446 : "=r" ((USItype) (sh)), \
447 "=&r" ((USItype) (sl)) \
448 : "%0" ((USItype) (ah)), \
449 "r" ((USItype) (bh)), \
450 "%1" ((USItype) (al)), \
451 "r" ((USItype) (bl)) \
453 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
454 /* The cmp clears the condition bit. */ \
458 : "=r" ((USItype) (sh)), \
459 "=&r" ((USItype) (sl)) \
460 : "0" ((USItype) (ah)), \
461 "r" ((USItype) (bh)), \
462 "1" ((USItype) (al)), \
463 "r" ((USItype) (bl)) \
465 #endif /* __M32R__ */
467 #if defined (__mc68000__)
468 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
469 __asm__ ("add%.l %5,%1
471 : "=d" ((USItype) (sh)), \
472 "=&d" ((USItype) (sl)) \
473 : "%0" ((USItype) (ah)), \
474 "d" ((USItype) (bh)), \
475 "%1" ((USItype) (al)), \
476 "g" ((USItype) (bl)))
477 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
478 __asm__ ("sub%.l %5,%1
480 : "=d" ((USItype) (sh)), \
481 "=&d" ((USItype) (sl)) \
482 : "0" ((USItype) (ah)), \
483 "d" ((USItype) (bh)), \
484 "1" ((USItype) (al)), \
485 "g" ((USItype) (bl)))
487 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
488 #if defined (__mc68020__) || defined(mc68020) \
489 || defined(__mc68030__) || defined(mc68030) \
490 || defined(__mc68040__) || defined(mc68040) \
491 || defined(__mcpu32__) || defined(mcpu32) \
493 #define umul_ppmm(w1, w0, u, v) \
494 __asm__ ("mulu%.l %3,%1:%0" \
495 : "=d" ((USItype) (w0)), \
496 "=d" ((USItype) (w1)) \
497 : "%0" ((USItype) (u)), \
498 "dmi" ((USItype) (v)))
500 #define udiv_qrnnd(q, r, n1, n0, d) \
501 __asm__ ("divu%.l %4,%1:%0" \
502 : "=d" ((USItype) (q)), \
503 "=d" ((USItype) (r)) \
504 : "0" ((USItype) (n0)), \
505 "1" ((USItype) (n1)), \
506 "dmi" ((USItype) (d)))
508 #define sdiv_qrnnd(q, r, n1, n0, d) \
509 __asm__ ("divs%.l %4,%1:%0" \
510 : "=d" ((USItype) (q)), \
511 "=d" ((USItype) (r)) \
512 : "0" ((USItype) (n0)), \
513 "1" ((USItype) (n1)), \
514 "dmi" ((USItype) (d)))
516 #else /* not mc68020 */
517 #if !defined(__mcf5200__)
518 /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
519 #define umul_ppmm(xh, xl, a, b) \
520 __asm__ ("| Inlined umul_ppmm
546 : "=g" ((USItype) (xh)), \
547 "=g" ((USItype) (xl)) \
548 : "g" ((USItype) (a)), \
549 "g" ((USItype) (b)) \
550 : "d0", "d1", "d2", "d3", "d4")
551 #define UMUL_TIME 100
552 #define UDIV_TIME 400
553 #endif /* not mcf5200 */
554 #endif /* not mc68020 */
556 /* The '020, '030, '040 and '060 have bitfield insns. */
557 #if defined (__mc68020__) || defined(mc68020) \
558 || defined(__mc68030__) || defined(mc68030) \
559 || defined(__mc68040__) || defined(mc68040) \
560 || defined(__mc68060__) || defined(mc68060) \
562 #define count_leading_zeros(count, x) \
563 __asm__ ("bfffo %1{%b2:%b2},%0" \
564 : "=d" ((USItype) (count)) \
565 : "od" ((USItype) (x)), "n" (0))
569 #if defined (__m88000__)
570 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
571 __asm__ ("addu.co %1,%r4,%r5
572 addu.ci %0,%r2,%r3" \
573 : "=r" ((USItype) (sh)), \
574 "=&r" ((USItype) (sl)) \
575 : "%rJ" ((USItype) (ah)), \
576 "rJ" ((USItype) (bh)), \
577 "%rJ" ((USItype) (al)), \
578 "rJ" ((USItype) (bl)))
579 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
580 __asm__ ("subu.co %1,%r4,%r5
581 subu.ci %0,%r2,%r3" \
582 : "=r" ((USItype) (sh)), \
583 "=&r" ((USItype) (sl)) \
584 : "rJ" ((USItype) (ah)), \
585 "rJ" ((USItype) (bh)), \
586 "rJ" ((USItype) (al)), \
587 "rJ" ((USItype) (bl)))
588 #define count_leading_zeros(count, x) \
591 __asm__ ("ff1 %0,%1" \
593 : "r" ((USItype) (x))); \
594 (count) = __cbtmp ^ 31; \
596 #if defined (__mc88110__)
597 #define umul_ppmm(wh, wl, u, v) \
599 union {UDItype __ll; \
600 struct {USItype __h, __l;} __i; \
602 __asm__ ("mulu.d %0,%1,%2" \
604 : "r" ((USItype) (u)), \
605 "r" ((USItype) (v))); \
606 (wh) = __xx.__i.__h; \
607 (wl) = __xx.__i.__l; \
609 #define udiv_qrnnd(q, r, n1, n0, d) \
610 ({union {UDItype __ll; \
611 struct {USItype __h, __l;} __i; \
614 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
615 __asm__ ("divu.d %0,%1,%2" \
618 "r" ((USItype) (d))); \
619 (r) = (n0) - __q * (d); (q) = __q; })
624 #define UDIV_TIME 150
625 #endif /* __mc88110__ */
626 #endif /* __m88000__ */
628 #if defined (__mips__)
629 #define umul_ppmm(w1, w0, u, v) \
630 __asm__ ("multu %2,%3" \
631 : "=l" ((USItype) (w0)), \
632 "=h" ((USItype) (w1)) \
633 : "d" ((USItype) (u)), \
636 #define UDIV_TIME 100
637 #endif /* __mips__ */
639 #if defined (__ns32000__)
640 #define umul_ppmm(w1, w0, u, v) \
641 ({union {UDItype __ll; \
642 struct {USItype __l, __h;} __i; \
644 __asm__ ("meid %2,%0" \
646 : "%0" ((USItype) (u)), \
647 "g" ((USItype) (v))); \
648 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
649 #define __umulsidi3(u, v) \
651 __asm__ ("meid %2,%0" \
653 : "%0" ((USItype) (u)), \
654 "g" ((USItype) (v))); \
656 #define udiv_qrnnd(q, r, n1, n0, d) \
657 ({union {UDItype __ll; \
658 struct {USItype __l, __h;} __i; \
660 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
661 __asm__ ("deid %2,%0" \
664 "g" ((USItype) (d))); \
665 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
666 #endif /* __ns32000__ */
668 #if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
669 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
671 if (__builtin_constant_p (bh) && (bh) == 0) \
672 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
673 : "=r" ((USItype) (sh)), \
674 "=&r" ((USItype) (sl)) \
675 : "%r" ((USItype) (ah)), \
676 "%r" ((USItype) (al)), \
677 "rI" ((USItype) (bl))); \
678 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
679 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
680 : "=r" ((USItype) (sh)), \
681 "=&r" ((USItype) (sl)) \
682 : "%r" ((USItype) (ah)), \
683 "%r" ((USItype) (al)), \
684 "rI" ((USItype) (bl))); \
686 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
687 : "=r" ((USItype) (sh)), \
688 "=&r" ((USItype) (sl)) \
689 : "%r" ((USItype) (ah)), \
690 "r" ((USItype) (bh)), \
691 "%r" ((USItype) (al)), \
692 "rI" ((USItype) (bl))); \
694 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
696 if (__builtin_constant_p (ah) && (ah) == 0) \
697 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
698 : "=r" ((USItype) (sh)), \
699 "=&r" ((USItype) (sl)) \
700 : "r" ((USItype) (bh)), \
701 "rI" ((USItype) (al)), \
702 "r" ((USItype) (bl))); \
703 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
704 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
705 : "=r" ((USItype) (sh)), \
706 "=&r" ((USItype) (sl)) \
707 : "r" ((USItype) (bh)), \
708 "rI" ((USItype) (al)), \
709 "r" ((USItype) (bl))); \
710 else if (__builtin_constant_p (bh) && (bh) == 0) \
711 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
712 : "=r" ((USItype) (sh)), \
713 "=&r" ((USItype) (sl)) \
714 : "r" ((USItype) (ah)), \
715 "rI" ((USItype) (al)), \
716 "r" ((USItype) (bl))); \
717 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
718 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
719 : "=r" ((USItype) (sh)), \
720 "=&r" ((USItype) (sl)) \
721 : "r" ((USItype) (ah)), \
722 "rI" ((USItype) (al)), \
723 "r" ((USItype) (bl))); \
725 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
726 : "=r" ((USItype) (sh)), \
727 "=&r" ((USItype) (sl)) \
728 : "r" ((USItype) (ah)), \
729 "r" ((USItype) (bh)), \
730 "rI" ((USItype) (al)), \
731 "r" ((USItype) (bl))); \
733 #define count_leading_zeros(count, x) \
734 __asm__ ("{cntlz|cntlzw} %0,%1" \
735 : "=r" ((USItype) (count)) \
736 : "r" ((USItype) (x)))
737 #if defined (_ARCH_PPC)
738 #define umul_ppmm(ph, pl, m0, m1) \
740 USItype __m0 = (m0), __m1 = (m1); \
741 __asm__ ("mulhwu %0,%1,%2" \
742 : "=r" ((USItype) ph) \
745 (pl) = __m0 * __m1; \
748 #define smul_ppmm(ph, pl, m0, m1) \
750 SItype __m0 = (m0), __m1 = (m1); \
751 __asm__ ("mulhw %0,%1,%2" \
752 : "=r" ((SItype) ph) \
755 (pl) = __m0 * __m1; \
758 #define UDIV_TIME 120
760 #define umul_ppmm(xh, xl, m0, m1) \
762 USItype __m0 = (m0), __m1 = (m1); \
763 __asm__ ("mul %0,%2,%3" \
764 : "=r" ((USItype) (xh)), \
765 "=q" ((USItype) (xl)) \
768 (xh) += ((((SItype) __m0 >> 31) & __m1) \
769 + (((SItype) __m1 >> 31) & __m0)); \
772 #define smul_ppmm(xh, xl, m0, m1) \
773 __asm__ ("mul %0,%2,%3" \
774 : "=r" ((SItype) (xh)), \
775 "=q" ((SItype) (xl)) \
779 #define sdiv_qrnnd(q, r, nh, nl, d) \
780 __asm__ ("div %0,%2,%4" \
781 : "=r" ((SItype) (q)), "=q" ((SItype) (r)) \
782 : "r" ((SItype) (nh)), "1" ((SItype) (nl)), "r" ((SItype) (d)))
783 #define UDIV_TIME 100
785 #endif /* Power architecture variants. */
787 #if defined (__pyr__)
788 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
791 : "=r" ((USItype) (sh)), \
792 "=&r" ((USItype) (sl)) \
793 : "%0" ((USItype) (ah)), \
794 "g" ((USItype) (bh)), \
795 "%1" ((USItype) (al)), \
796 "g" ((USItype) (bl)))
797 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
800 : "=r" ((USItype) (sh)), \
801 "=&r" ((USItype) (sl)) \
802 : "0" ((USItype) (ah)), \
803 "g" ((USItype) (bh)), \
804 "1" ((USItype) (al)), \
805 "g" ((USItype) (bl)))
806 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
807 #define umul_ppmm(w1, w0, u, v) \
808 ({union {UDItype __ll; \
809 struct {USItype __h, __l;} __i; \
811 __asm__ ("movw %1,%R0
813 : "=&r" (__xx.__ll) \
814 : "g" ((USItype) (u)), \
815 "g" ((USItype) (v))); \
816 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
819 #if defined (__ibm032__) /* RT/ROMP */
820 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
823 : "=r" ((USItype) (sh)), \
824 "=&r" ((USItype) (sl)) \
825 : "%0" ((USItype) (ah)), \
826 "r" ((USItype) (bh)), \
827 "%1" ((USItype) (al)), \
828 "r" ((USItype) (bl)))
829 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
832 : "=r" ((USItype) (sh)), \
833 "=&r" ((USItype) (sl)) \
834 : "0" ((USItype) (ah)), \
835 "r" ((USItype) (bh)), \
836 "1" ((USItype) (al)), \
837 "r" ((USItype) (bl)))
838 #define umul_ppmm(ph, pl, m0, m1) \
840 USItype __m0 = (m0), __m1 = (m1); \
862 : "=r" ((USItype) (ph)), \
863 "=r" ((USItype) (pl)) \
867 (ph) += ((((SItype) __m0 >> 31) & __m1) \
868 + (((SItype) __m1 >> 31) & __m0)); \
871 #define UDIV_TIME 200
872 #define count_leading_zeros(count, x) \
874 if ((x) >= 0x10000) \
875 __asm__ ("clz %0,%1" \
876 : "=r" ((USItype) (count)) \
877 : "r" ((USItype) (x) >> 16)); \
880 __asm__ ("clz %0,%1" \
881 : "=r" ((USItype) (count)) \
882 : "r" ((USItype) (x))); \
888 #if defined (__sparc__)
889 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
890 __asm__ ("addcc %r4,%5,%1
892 : "=r" ((USItype) (sh)), \
893 "=&r" ((USItype) (sl)) \
894 : "%rJ" ((USItype) (ah)), \
895 "rI" ((USItype) (bh)), \
896 "%rJ" ((USItype) (al)), \
897 "rI" ((USItype) (bl)) \
899 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
900 __asm__ ("subcc %r4,%5,%1
902 : "=r" ((USItype) (sh)), \
903 "=&r" ((USItype) (sl)) \
904 : "rJ" ((USItype) (ah)), \
905 "rI" ((USItype) (bh)), \
906 "rJ" ((USItype) (al)), \
907 "rI" ((USItype) (bl)) \
909 #if defined (__sparc_v8__)
910 #define umul_ppmm(w1, w0, u, v) \
911 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
912 : "=r" ((USItype) (w1)), \
913 "=r" ((USItype) (w0)) \
914 : "r" ((USItype) (u)), \
916 #define udiv_qrnnd(q, r, n1, n0, d) \
917 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
918 : "=&r" ((USItype) (q)), \
919 "=&r" ((USItype) (r)) \
920 : "r" ((USItype) (n1)), \
921 "r" ((USItype) (n0)), \
924 #if defined (__sparclite__)
925 /* This has hardware multiply but not divide. It also has two additional
926 instructions scan (ffs from high bit) and divscc. */
927 #define umul_ppmm(w1, w0, u, v) \
928 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
929 : "=r" ((USItype) (w1)), \
930 "=r" ((USItype) (w0)) \
931 : "r" ((USItype) (u)), \
933 #define udiv_qrnnd(q, r, n1, n0, d) \
934 __asm__ ("! Inlined udiv_qrnnd
935 wr %%g0,%2,%%y ! Not a delayed write for sparclite
972 1: ! End of inline udiv_qrnnd" \
973 : "=r" ((USItype) (q)), \
974 "=r" ((USItype) (r)) \
975 : "r" ((USItype) (n1)), \
976 "r" ((USItype) (n0)), \
977 "rI" ((USItype) (d)) \
978 : "%g1" __AND_CLOBBER_CC)
980 #define count_leading_zeros(count, x) \
981 __asm__ ("scan %1,0,%0" \
982 : "=r" ((USItype) (x)) \
983 : "r" ((USItype) (count)))
985 /* SPARC without integer multiplication and divide instructions.
986 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
987 #define umul_ppmm(w1, w0, u, v) \
988 __asm__ ("! Inlined umul_ppmm
989 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
990 sra %3,31,%%g2 ! Don't move this insn
991 and %2,%%g2,%%g2 ! Don't move this insn
992 andcc %%g0,0,%%g1 ! Don't move this insn
1028 : "=r" ((USItype) (w1)), \
1029 "=r" ((USItype) (w0)) \
1030 : "%rI" ((USItype) (u)), \
1031 "r" ((USItype) (v)) \
1032 : "%g1", "%g2" __AND_CLOBBER_CC)
1033 #define UMUL_TIME 39 /* 39 instructions */
1034 /* It's quite necessary to add this much assembler for the sparc.
1035 The default udiv_qrnnd (in C) is more than 10 times slower! */
1036 #define udiv_qrnnd(q, r, n1, n0, d) \
1037 __asm__ ("! Inlined udiv_qrnnd
1041 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1042 sub %1,%2,%1 ! this kills msb of n
1043 addx %1,%1,%1 ! so this can't give carry
1048 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1050 sub %1,%2,%1 ! this kills msb of n
1055 ! Got carry from n. Subtract next step to cancel this carry.
1057 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
1060 ! End of inline udiv_qrnnd" \
1061 : "=&r" ((USItype) (q)), \
1062 "=&r" ((USItype) (r)) \
1063 : "r" ((USItype) (d)), \
1064 "1" ((USItype) (n1)), \
1065 "0" ((USItype) (n0)) : "%g1" __AND_CLOBBER_CC)
1066 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
1067 #endif /* __sparclite__ */
1068 #endif /* __sparc_v8__ */
1069 #endif /* __sparc__ */
1071 #if defined (__vax__)
1072 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1073 __asm__ ("addl2 %5,%1
1075 : "=g" ((USItype) (sh)), \
1076 "=&g" ((USItype) (sl)) \
1077 : "%0" ((USItype) (ah)), \
1078 "g" ((USItype) (bh)), \
1079 "%1" ((USItype) (al)), \
1080 "g" ((USItype) (bl)))
1081 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1082 __asm__ ("subl2 %5,%1
1084 : "=g" ((USItype) (sh)), \
1085 "=&g" ((USItype) (sl)) \
1086 : "0" ((USItype) (ah)), \
1087 "g" ((USItype) (bh)), \
1088 "1" ((USItype) (al)), \
1089 "g" ((USItype) (bl)))
1090 #define umul_ppmm(xh, xl, m0, m1) \
1094 struct {USItype __l, __h;} __i; \
1096 USItype __m0 = (m0), __m1 = (m1); \
1097 __asm__ ("emul %1,%2,$0,%0" \
1098 : "=r" (__xx.__ll) \
1101 (xh) = __xx.__i.__h; \
1102 (xl) = __xx.__i.__l; \
1103 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1104 + (((SItype) __m1 >> 31) & __m0)); \
1106 #define sdiv_qrnnd(q, r, n1, n0, d) \
1108 union {DItype __ll; \
1109 struct {SItype __l, __h;} __i; \
1111 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1112 __asm__ ("ediv %3,%2,%0,%1" \
1113 : "=g" (q), "=g" (r) \
1114 : "g" (__xx.__ll), "g" (d)); \
1116 #endif /* __vax__ */
1118 #endif /* __GNUC__ */
1120 /* If this machine has no inline assembler, use C macros. */
1122 #if !defined (add_ssaaaa)
1123 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1126 __x = (al) + (bl); \
1127 (sh) = (ah) + (bh) + (__x < (al)); \
1132 #if !defined (sub_ddmmss)
1133 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1136 __x = (al) - (bl); \
1137 (sh) = (ah) - (bh) - (__x > (al)); \
1142 #if !defined (umul_ppmm)
1143 #define umul_ppmm(w1, w0, u, v) \
1145 USItype __x0, __x1, __x2, __x3; \
1146 USItype __ul, __vl, __uh, __vh; \
1148 __ul = __ll_lowpart (u); \
1149 __uh = __ll_highpart (u); \
1150 __vl = __ll_lowpart (v); \
1151 __vh = __ll_highpart (v); \
1153 __x0 = (USItype) __ul * __vl; \
1154 __x1 = (USItype) __ul * __vh; \
1155 __x2 = (USItype) __uh * __vl; \
1156 __x3 = (USItype) __uh * __vh; \
1158 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1159 __x1 += __x2; /* but this indeed can */ \
1160 if (__x1 < __x2) /* did we get it? */ \
1161 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1163 (w1) = __x3 + __ll_highpart (__x1); \
1164 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1168 #if !defined (__umulsidi3)
1169 #define __umulsidi3(u, v) \
1171 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1175 /* Define this unconditionally, so it can be used for debugging. */
1176 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1178 USItype __d1, __d0, __q1, __q0; \
1179 USItype __r1, __r0, __m; \
1180 __d1 = __ll_highpart (d); \
1181 __d0 = __ll_lowpart (d); \
1183 __r1 = (n1) % __d1; \
1184 __q1 = (n1) / __d1; \
1185 __m = (USItype) __q1 * __d0; \
1186 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1189 __q1--, __r1 += (d); \
1190 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1192 __q1--, __r1 += (d); \
1196 __r0 = __r1 % __d1; \
1197 __q0 = __r1 / __d1; \
1198 __m = (USItype) __q0 * __d0; \
1199 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1202 __q0--, __r0 += (d); \
1205 __q0--, __r0 += (d); \
1209 (q) = (USItype) __q1 * __ll_B | __q0; \
1213 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1214 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1215 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1216 #define udiv_qrnnd(q, r, nh, nl, d) \
1219 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1224 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1225 #if !defined (udiv_qrnnd)
1226 #define UDIV_NEEDS_NORMALIZATION 1
1227 #define udiv_qrnnd __udiv_qrnnd_c
1230 #if !defined (count_leading_zeros)
1231 extern const UQItype __clz_tab
[];
1232 #define count_leading_zeros(count, x) \
1234 USItype __xr = (x); \
1237 if (SI_TYPE_SIZE <= 32) \
1239 __a = __xr < ((USItype)1<<2*__BITS4) \
1240 ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \
1241 : (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1245 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1246 if (((__xr >> __a) & 0xff) != 0) \
1250 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1254 #ifndef UDIV_NEEDS_NORMALIZATION
1255 #define UDIV_NEEDS_NORMALIZATION 0