1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004,
3 2005 Free Software Foundation, Inc.
5 This definition file is free software; you can redistribute it
6 and/or modify it under the terms of the GNU General Public
7 License as published by the Free Software Foundation; either
8 version 2, or (at your option) any later version.
10 This definition file is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied
12 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 See the GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 Boston, MA 02110-1301, USA. */
20 /* You have to define the following before including this file:
22 UWtype -- An unsigned type, default type for operations (typically a "word")
23 UHWtype -- An unsigned type, at least half the size of UWtype.
24 UDWtype -- An unsigned type, at least twice as large a UWtype
25 W_TYPE_SIZE -- size in bits of UWtype
27 UQItype -- Unsigned 8 bit type.
28 SItype, USItype -- Signed and unsigned 32 bit types.
29 DItype, UDItype -- Signed and unsigned 64 bit types.
31 On a 32 bit machine UWtype should typically be USItype;
32 on a 64 bit machine, UWtype should typically be UDItype. */
34 #define __BITS4 (W_TYPE_SIZE / 4)
35 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
36 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
37 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
40 #define W_TYPE_SIZE 32
41 #define UWtype USItype
42 #define UHWtype USItype
43 #define UDWtype UDItype
46 extern const UQItype __clz_tab
[256];
48 /* Define auxiliary asm macros.
50 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
51 UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
52 word product in HIGH_PROD and LOW_PROD.
54 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
55 UDWtype product. This is just a variant of umul_ppmm.
57 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
58 denominator) divides a UDWtype, composed by the UWtype integers
59 HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
60 in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
61 than DENOMINATOR for correct operation. If, in addition, the most
62 significant bit of DENOMINATOR must be 1, then the pre-processor symbol
63 UDIV_NEEDS_NORMALIZATION is defined to 1.
65 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
66 denominator). Like udiv_qrnnd but the numbers are signed. The quotient
69 5) count_leading_zeros(count, x) counts the number of zero-bits from the
70 msb to the first nonzero bit in the UWtype X. This is the number of
71 steps X needs to be shifted left to set the msb. Undefined for X == 0,
72 unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
74 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
75 from the least significant end.
77 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
78 high_addend_2, low_addend_2) adds two UWtype integers, composed by
79 HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
80 respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
81 (i.e. carry out) is not stored anywhere, and is lost.
83 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
84 high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
85 composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
86 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
87 and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
90 If any of these macros are left undefined for a particular CPU,
93 /* The CPUs come in alphabetical order below.
95 Please add support for more CPUs here, or improve the current support
97 (E.g. WE32100, IBM360.) */
99 #if defined (__GNUC__) && !defined (NO_ASM)
101 /* We sometimes need to clobber "cc" with gcc2, but that would not be
102 understood by gcc1. Use cpp to avoid major code duplication. */
105 #define __AND_CLOBBER_CC
106 #else /* __GNUC__ >= 2 */
107 #define __CLOBBER_CC : "cc"
108 #define __AND_CLOBBER_CC , "cc"
109 #endif /* __GNUC__ < 2 */
111 #if defined (__alpha) && W_TYPE_SIZE == 64
112 #define umul_ppmm(ph, pl, m0, m1) \
114 UDItype __m0 = (m0), __m1 = (m1); \
115 (ph) = __builtin_alpha_umulh (__m0, __m1); \
116 (pl) = __m0 * __m1; \
119 #ifndef LONGLONG_STANDALONE
120 #define udiv_qrnnd(q, r, n1, n0, d) \
122 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
125 extern UDItype
__udiv_qrnnd (UDItype
*, UDItype
, UDItype
, UDItype
);
126 #define UDIV_TIME 220
127 #endif /* LONGLONG_STANDALONE */
129 #define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzl (X))
130 #define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X))
131 #define COUNT_LEADING_ZEROS_0 64
133 #define count_leading_zeros(COUNT,X) \
135 UDItype __xr = (X), __t, __a; \
136 __t = __builtin_alpha_cmpbge (0, __xr); \
137 __a = __clz_tab[__t ^ 0xff] - 1; \
138 __t = __builtin_alpha_extbl (__xr, __a); \
139 (COUNT) = 64 - (__clz_tab[__t] + __a*8); \
141 #define count_trailing_zeros(COUNT,X) \
143 UDItype __xr = (X), __t, __a; \
144 __t = __builtin_alpha_cmpbge (0, __xr); \
145 __t = ~__t & -~__t; \
146 __a = ((__t & 0xCC) != 0) * 2; \
147 __a += ((__t & 0xF0) != 0) * 4; \
148 __a += ((__t & 0xAA) != 0); \
149 __t = __builtin_alpha_extbl (__xr, __a); \
152 __a += ((__t & 0xCC) != 0) * 2; \
153 __a += ((__t & 0xF0) != 0) * 4; \
154 __a += ((__t & 0xAA) != 0); \
157 #endif /* __alpha_cix__ */
160 #if defined (__arc__) && W_TYPE_SIZE == 32
161 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
162 __asm__ ("add.f %1, %4, %5\n\tadc %0, %2, %3" \
163 : "=r" ((USItype) (sh)), \
164 "=&r" ((USItype) (sl)) \
165 : "%r" ((USItype) (ah)), \
166 "rIJ" ((USItype) (bh)), \
167 "%r" ((USItype) (al)), \
168 "rIJ" ((USItype) (bl)))
169 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
170 __asm__ ("sub.f %1, %4, %5\n\tsbc %0, %2, %3" \
171 : "=r" ((USItype) (sh)), \
172 "=&r" ((USItype) (sl)) \
173 : "r" ((USItype) (ah)), \
174 "rIJ" ((USItype) (bh)), \
175 "r" ((USItype) (al)), \
176 "rIJ" ((USItype) (bl)))
177 /* Call libgcc routine. */
178 #define umul_ppmm(w1, w0, u, v) \
181 __w.ll = __umulsidi3 (u, v); \
185 #define __umulsidi3 __umulsidi3
186 UDItype
__umulsidi3 (USItype
, USItype
);
189 #if defined (__arm__) && !defined (__thumb__) && W_TYPE_SIZE == 32
190 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
191 __asm__ ("adds %1, %4, %5\n\tadc %0, %2, %3" \
192 : "=r" ((USItype) (sh)), \
193 "=&r" ((USItype) (sl)) \
194 : "%r" ((USItype) (ah)), \
195 "rI" ((USItype) (bh)), \
196 "%r" ((USItype) (al)), \
197 "rI" ((USItype) (bl)) __CLOBBER_CC)
198 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
199 __asm__ ("subs %1, %4, %5\n\tsbc %0, %2, %3" \
200 : "=r" ((USItype) (sh)), \
201 "=&r" ((USItype) (sl)) \
202 : "r" ((USItype) (ah)), \
203 "rI" ((USItype) (bh)), \
204 "r" ((USItype) (al)), \
205 "rI" ((USItype) (bl)) __CLOBBER_CC)
206 #define umul_ppmm(xh, xl, a, b) \
207 {register USItype __t0, __t1, __t2; \
208 __asm__ ("%@ Inlined umul_ppmm\n" \
209 " mov %2, %5, lsr #16\n" \
210 " mov %0, %6, lsr #16\n" \
211 " bic %3, %5, %2, lsl #16\n" \
212 " bic %4, %6, %0, lsl #16\n" \
213 " mul %1, %3, %4\n" \
214 " mul %4, %2, %4\n" \
215 " mul %3, %0, %3\n" \
216 " mul %0, %2, %0\n" \
217 " adds %3, %4, %3\n" \
218 " addcs %0, %0, #65536\n" \
219 " adds %1, %1, %3, lsl #16\n" \
220 " adc %0, %0, %3, lsr #16" \
221 : "=&r" ((USItype) (xh)), \
222 "=r" ((USItype) (xl)), \
223 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
224 : "r" ((USItype) (a)), \
225 "r" ((USItype) (b)) __CLOBBER_CC );}
227 #define UDIV_TIME 100
230 #if defined (__hppa) && W_TYPE_SIZE == 32
231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
232 __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
233 : "=r" ((USItype) (sh)), \
234 "=&r" ((USItype) (sl)) \
235 : "%rM" ((USItype) (ah)), \
236 "rM" ((USItype) (bh)), \
237 "%rM" ((USItype) (al)), \
238 "rM" ((USItype) (bl)))
239 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
240 __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0" \
241 : "=r" ((USItype) (sh)), \
242 "=&r" ((USItype) (sl)) \
243 : "rM" ((USItype) (ah)), \
244 "rM" ((USItype) (bh)), \
245 "rM" ((USItype) (al)), \
246 "rM" ((USItype) (bl)))
247 #if defined (_PA_RISC1_1)
248 #define umul_ppmm(w1, w0, u, v) \
253 struct {USItype __w1, __w0;} __w1w0; \
255 __asm__ ("xmpyu %1,%2,%0" \
257 : "x" ((USItype) (u)), \
258 "x" ((USItype) (v))); \
259 (w1) = __t.__w1w0.__w1; \
260 (w0) = __t.__w1w0.__w0; \
267 #define count_leading_zeros(count, x) \
272 " extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
273 " extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n"\
274 " ldo 16(%0),%0 ; Yes. Perform add.\n" \
275 " extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
276 " extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n"\
277 " ldo 8(%0),%0 ; Yes. Perform add.\n" \
278 " extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
279 " extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n"\
280 " ldo 4(%0),%0 ; Yes. Perform add.\n" \
281 " extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
282 " extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n"\
283 " ldo 2(%0),%0 ; Yes. Perform add.\n" \
284 " extru %1,30,1,%1 ; Extract bit 1.\n" \
285 " sub %0,%1,%0 ; Subtract it.\n" \
286 : "=r" (count), "=r" (__tmp) : "1" (x)); \
290 #if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
291 #define smul_ppmm(xh, xl, m0, m1) \
293 union {DItype __ll; \
294 struct {USItype __h, __l;} __i; \
296 __asm__ ("lr %N0,%1\n\tmr %0,%2" \
298 : "r" (m0), "r" (m1)); \
299 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
301 #define sdiv_qrnnd(q, r, n1, n0, d) \
303 union {DItype __ll; \
304 struct {USItype __h, __l;} __i; \
306 __x.__i.__h = n1; __x.__i.__l = n0; \
307 __asm__ ("dr %0,%2" \
309 : "0" (__x.__ll), "r" (d)); \
310 (q) = __x.__i.__l; (r) = __x.__i.__h; \
314 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
315 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
316 __asm__ ("addl %5,%1\n\tadcl %3,%0" \
317 : "=r" ((USItype) (sh)), \
318 "=&r" ((USItype) (sl)) \
319 : "%0" ((USItype) (ah)), \
320 "g" ((USItype) (bh)), \
321 "%1" ((USItype) (al)), \
322 "g" ((USItype) (bl)))
323 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
324 __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
325 : "=r" ((USItype) (sh)), \
326 "=&r" ((USItype) (sl)) \
327 : "0" ((USItype) (ah)), \
328 "g" ((USItype) (bh)), \
329 "1" ((USItype) (al)), \
330 "g" ((USItype) (bl)))
331 #define umul_ppmm(w1, w0, u, v) \
333 : "=a" ((USItype) (w0)), \
334 "=d" ((USItype) (w1)) \
335 : "%0" ((USItype) (u)), \
336 "rm" ((USItype) (v)))
337 #define udiv_qrnnd(q, r, n1, n0, dv) \
339 : "=a" ((USItype) (q)), \
340 "=d" ((USItype) (r)) \
341 : "0" ((USItype) (n0)), \
342 "1" ((USItype) (n1)), \
343 "rm" ((USItype) (dv)))
344 #define count_leading_zeros(count, x) \
347 __asm__ ("bsrl %1,%0" \
348 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
349 (count) = __cbtmp ^ 31; \
351 #define count_trailing_zeros(count, x) \
352 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
357 #if defined (__i960__) && W_TYPE_SIZE == 32
358 #define umul_ppmm(w1, w0, u, v) \
359 ({union {UDItype __ll; \
360 struct {USItype __l, __h;} __i; \
362 __asm__ ("emul %2,%1,%0" \
364 : "%dI" ((USItype) (u)), \
365 "dI" ((USItype) (v))); \
366 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
367 #define __umulsidi3(u, v) \
369 __asm__ ("emul %2,%1,%0" \
371 : "%dI" ((USItype) (u)), \
372 "dI" ((USItype) (v))); \
374 #endif /* __i960__ */
376 #if defined (__M32R__) && W_TYPE_SIZE == 32
377 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
378 /* The cmp clears the condition bit. */ \
379 __asm__ ("cmp %0,%0\n\taddx %1,%5\n\taddx %0,%3" \
380 : "=r" ((USItype) (sh)), \
381 "=&r" ((USItype) (sl)) \
382 : "0" ((USItype) (ah)), \
383 "r" ((USItype) (bh)), \
384 "1" ((USItype) (al)), \
385 "r" ((USItype) (bl)) \
387 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
388 /* The cmp clears the condition bit. */ \
389 __asm__ ("cmp %0,%0\n\tsubx %1,%5\n\tsubx %0,%3" \
390 : "=r" ((USItype) (sh)), \
391 "=&r" ((USItype) (sl)) \
392 : "0" ((USItype) (ah)), \
393 "r" ((USItype) (bh)), \
394 "1" ((USItype) (al)), \
395 "r" ((USItype) (bl)) \
397 #endif /* __M32R__ */
399 #if defined (__mc68000__) && W_TYPE_SIZE == 32
400 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
401 __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
402 : "=d" ((USItype) (sh)), \
403 "=&d" ((USItype) (sl)) \
404 : "%0" ((USItype) (ah)), \
405 "d" ((USItype) (bh)), \
406 "%1" ((USItype) (al)), \
407 "g" ((USItype) (bl)))
408 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
409 __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
410 : "=d" ((USItype) (sh)), \
411 "=&d" ((USItype) (sl)) \
412 : "0" ((USItype) (ah)), \
413 "d" ((USItype) (bh)), \
414 "1" ((USItype) (al)), \
415 "g" ((USItype) (bl)))
417 /* The '020, '030, '040, '060 and CPU32 have 32x32->64 and 64/32->32q-32r. */
418 #if (defined (__mc68020__) && !defined (__mc68060__))
419 #define umul_ppmm(w1, w0, u, v) \
420 __asm__ ("mulu%.l %3,%1:%0" \
421 : "=d" ((USItype) (w0)), \
422 "=d" ((USItype) (w1)) \
423 : "%0" ((USItype) (u)), \
424 "dmi" ((USItype) (v)))
426 #define udiv_qrnnd(q, r, n1, n0, d) \
427 __asm__ ("divu%.l %4,%1:%0" \
428 : "=d" ((USItype) (q)), \
429 "=d" ((USItype) (r)) \
430 : "0" ((USItype) (n0)), \
431 "1" ((USItype) (n1)), \
432 "dmi" ((USItype) (d)))
434 #define sdiv_qrnnd(q, r, n1, n0, d) \
435 __asm__ ("divs%.l %4,%1:%0" \
436 : "=d" ((USItype) (q)), \
437 "=d" ((USItype) (r)) \
438 : "0" ((USItype) (n0)), \
439 "1" ((USItype) (n1)), \
440 "dmi" ((USItype) (d)))
442 #elif defined (__mcoldfire__) /* not mc68020 */
444 #define umul_ppmm(xh, xl, a, b) \
445 __asm__ ("| Inlined umul_ppmm\n" \
446 " move%.l %2,%/d0\n" \
447 " move%.l %3,%/d1\n" \
448 " move%.l %/d0,%/d2\n" \
450 " move%.l %/d1,%/d3\n" \
452 " move%.w %/d2,%/d4\n" \
453 " mulu %/d3,%/d4\n" \
454 " mulu %/d1,%/d2\n" \
455 " mulu %/d0,%/d3\n" \
456 " mulu %/d0,%/d1\n" \
457 " move%.l %/d4,%/d0\n" \
460 " add%.l %/d0,%/d2\n" \
461 " add%.l %/d3,%/d2\n" \
463 " add%.l %#65536,%/d1\n" \
465 " moveq %#0,%/d0\n" \
466 " move%.w %/d2,%/d0\n" \
467 " move%.w %/d4,%/d2\n" \
468 " move%.l %/d2,%1\n" \
469 " add%.l %/d1,%/d0\n" \
471 : "=g" ((USItype) (xh)), \
472 "=g" ((USItype) (xl)) \
473 : "g" ((USItype) (a)), \
474 "g" ((USItype) (b)) \
475 : "d0", "d1", "d2", "d3", "d4")
476 #define UMUL_TIME 100
477 #define UDIV_TIME 400
478 #else /* not ColdFire */
479 /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
480 #define umul_ppmm(xh, xl, a, b) \
481 __asm__ ("| Inlined umul_ppmm\n" \
482 " move%.l %2,%/d0\n" \
483 " move%.l %3,%/d1\n" \
484 " move%.l %/d0,%/d2\n" \
486 " move%.l %/d1,%/d3\n" \
488 " move%.w %/d2,%/d4\n" \
489 " mulu %/d3,%/d4\n" \
490 " mulu %/d1,%/d2\n" \
491 " mulu %/d0,%/d3\n" \
492 " mulu %/d0,%/d1\n" \
493 " move%.l %/d4,%/d0\n" \
494 " eor%.w %/d0,%/d0\n" \
496 " add%.l %/d0,%/d2\n" \
497 " add%.l %/d3,%/d2\n" \
499 " add%.l %#65536,%/d1\n" \
501 " moveq %#0,%/d0\n" \
502 " move%.w %/d2,%/d0\n" \
503 " move%.w %/d4,%/d2\n" \
504 " move%.l %/d2,%1\n" \
505 " add%.l %/d1,%/d0\n" \
507 : "=g" ((USItype) (xh)), \
508 "=g" ((USItype) (xl)) \
509 : "g" ((USItype) (a)), \
510 "g" ((USItype) (b)) \
511 : "d0", "d1", "d2", "d3", "d4")
512 #define UMUL_TIME 100
513 #define UDIV_TIME 400
515 #endif /* not mc68020 */
517 /* The '020, '030, '040 and '060 have bitfield insns.
518 cpu32 disguises as a 68020, but lacks them. */
519 #if defined (__mc68020__) && !defined (__mcpu32__)
520 #define count_leading_zeros(count, x) \
521 __asm__ ("bfffo %1{%b2:%b2},%0" \
522 : "=d" ((USItype) (count)) \
523 : "od" ((USItype) (x)), "n" (0))
527 #if defined (__m88000__) && W_TYPE_SIZE == 32
528 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
529 __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
530 : "=r" ((USItype) (sh)), \
531 "=&r" ((USItype) (sl)) \
532 : "%rJ" ((USItype) (ah)), \
533 "rJ" ((USItype) (bh)), \
534 "%rJ" ((USItype) (al)), \
535 "rJ" ((USItype) (bl)))
536 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
537 __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
538 : "=r" ((USItype) (sh)), \
539 "=&r" ((USItype) (sl)) \
540 : "rJ" ((USItype) (ah)), \
541 "rJ" ((USItype) (bh)), \
542 "rJ" ((USItype) (al)), \
543 "rJ" ((USItype) (bl)))
544 #define count_leading_zeros(count, x) \
547 __asm__ ("ff1 %0,%1" \
549 : "r" ((USItype) (x))); \
550 (count) = __cbtmp ^ 31; \
552 #define COUNT_LEADING_ZEROS_0 63 /* sic */
553 #if defined (__mc88110__)
554 #define umul_ppmm(wh, wl, u, v) \
556 union {UDItype __ll; \
557 struct {USItype __h, __l;} __i; \
559 __asm__ ("mulu.d %0,%1,%2" \
561 : "r" ((USItype) (u)), \
562 "r" ((USItype) (v))); \
563 (wh) = __xx.__i.__h; \
564 (wl) = __xx.__i.__l; \
566 #define udiv_qrnnd(q, r, n1, n0, d) \
567 ({union {UDItype __ll; \
568 struct {USItype __h, __l;} __i; \
571 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
572 __asm__ ("divu.d %0,%1,%2" \
575 "r" ((USItype) (d))); \
576 (r) = (n0) - __q * (d); (q) = __q; })
581 #define UDIV_TIME 150
582 #endif /* __mc88110__ */
583 #endif /* __m88000__ */
585 #if defined (__mips__) && W_TYPE_SIZE == 32
586 #define umul_ppmm(w1, w0, u, v) \
587 __asm__ ("multu %2,%3" \
588 : "=l" ((USItype) (w0)), \
589 "=h" ((USItype) (w1)) \
590 : "d" ((USItype) (u)), \
593 #define UDIV_TIME 100
594 #endif /* __mips__ */
596 #if defined (__ns32000__) && W_TYPE_SIZE == 32
597 #define umul_ppmm(w1, w0, u, v) \
598 ({union {UDItype __ll; \
599 struct {USItype __l, __h;} __i; \
601 __asm__ ("meid %2,%0" \
603 : "%0" ((USItype) (u)), \
604 "g" ((USItype) (v))); \
605 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
606 #define __umulsidi3(u, v) \
608 __asm__ ("meid %2,%0" \
610 : "%0" ((USItype) (u)), \
611 "g" ((USItype) (v))); \
613 #define udiv_qrnnd(q, r, n1, n0, d) \
614 ({union {UDItype __ll; \
615 struct {USItype __l, __h;} __i; \
617 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
618 __asm__ ("deid %2,%0" \
621 "g" ((USItype) (d))); \
622 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
623 #define count_trailing_zeros(count,x) \
625 __asm__ ("ffsd %2,%0" \
626 : "=r" ((USItype) (count)) \
627 : "0" ((USItype) 0), \
628 "r" ((USItype) (x))); \
630 #endif /* __ns32000__ */
632 /* FIXME: We should test _IBMR2 here when we add assembly support for the
633 system vendor compilers.
634 FIXME: What's needed for gcc PowerPC VxWorks? __vxworks__ is not good
635 enough, since that hits ARM and m68k too. */
636 #if (defined (_ARCH_PPC) /* AIX */ \
637 || defined (_ARCH_PWR) /* AIX */ \
638 || defined (_ARCH_COM) /* AIX */ \
639 || defined (__powerpc__) /* gcc */ \
640 || defined (__POWERPC__) /* BEOS */ \
641 || defined (__ppc__) /* Darwin */ \
642 || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \
643 || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \
644 && CPU_FAMILY == PPC) \
645 ) && W_TYPE_SIZE == 32
646 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
648 if (__builtin_constant_p (bh) && (bh) == 0) \
649 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
650 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
651 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
652 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
653 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
655 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
656 : "=r" (sh), "=&r" (sl) \
657 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
659 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
661 if (__builtin_constant_p (ah) && (ah) == 0) \
662 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
663 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
664 else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
665 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
666 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
667 else if (__builtin_constant_p (bh) && (bh) == 0) \
668 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
669 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
670 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
671 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
672 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
674 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
675 : "=r" (sh), "=&r" (sl) \
676 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
678 #define count_leading_zeros(count, x) \
679 __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
680 #define COUNT_LEADING_ZEROS_0 32
681 #if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \
682 || defined (__ppc__) \
683 || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \
684 || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \
685 && CPU_FAMILY == PPC)
686 #define umul_ppmm(ph, pl, m0, m1) \
688 USItype __m0 = (m0), __m1 = (m1); \
689 __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
690 (pl) = __m0 * __m1; \
693 #define smul_ppmm(ph, pl, m0, m1) \
695 SItype __m0 = (m0), __m1 = (m1); \
696 __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
697 (pl) = __m0 * __m1; \
700 #define UDIV_TIME 120
701 #elif defined (_ARCH_PWR)
703 #define smul_ppmm(xh, xl, m0, m1) \
704 __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
706 #define sdiv_qrnnd(q, r, nh, nl, d) \
707 __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
708 #define UDIV_TIME 100
710 #endif /* 32-bit POWER architecture variants. */
712 /* We should test _IBMR2 here when we add assembly support for the system
714 #if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64
715 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
717 if (__builtin_constant_p (bh) && (bh) == 0) \
718 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
719 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
720 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
721 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
722 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
724 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
725 : "=r" (sh), "=&r" (sl) \
726 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
728 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
730 if (__builtin_constant_p (ah) && (ah) == 0) \
731 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
732 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
733 else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
734 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
735 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
736 else if (__builtin_constant_p (bh) && (bh) == 0) \
737 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
738 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
739 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
740 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
741 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
743 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
744 : "=r" (sh), "=&r" (sl) \
745 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
747 #define count_leading_zeros(count, x) \
748 __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
749 #define COUNT_LEADING_ZEROS_0 64
750 #define umul_ppmm(ph, pl, m0, m1) \
752 UDItype __m0 = (m0), __m1 = (m1); \
753 __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
754 (pl) = __m0 * __m1; \
757 #define smul_ppmm(ph, pl, m0, m1) \
759 DItype __m0 = (m0), __m1 = (m1); \
760 __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
761 (pl) = __m0 * __m1; \
763 #define SMUL_TIME 14 /* ??? */
764 #define UDIV_TIME 120 /* ??? */
765 #endif /* 64-bit PowerPC. */
767 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
768 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
769 __asm__ ("a %1,%5\n\tae %0,%3" \
770 : "=r" ((USItype) (sh)), \
771 "=&r" ((USItype) (sl)) \
772 : "%0" ((USItype) (ah)), \
773 "r" ((USItype) (bh)), \
774 "%1" ((USItype) (al)), \
775 "r" ((USItype) (bl)))
776 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
777 __asm__ ("s %1,%5\n\tse %0,%3" \
778 : "=r" ((USItype) (sh)), \
779 "=&r" ((USItype) (sl)) \
780 : "0" ((USItype) (ah)), \
781 "r" ((USItype) (bh)), \
782 "1" ((USItype) (al)), \
783 "r" ((USItype) (bl)))
784 #define umul_ppmm(ph, pl, m0, m1) \
786 USItype __m0 = (m0), __m1 = (m1); \
808 : "=r" ((USItype) (ph)), \
809 "=r" ((USItype) (pl)) \
813 (ph) += ((((SItype) __m0 >> 31) & __m1) \
814 + (((SItype) __m1 >> 31) & __m0)); \
817 #define UDIV_TIME 200
818 #define count_leading_zeros(count, x) \
820 if ((x) >= 0x10000) \
821 __asm__ ("clz %0,%1" \
822 : "=r" ((USItype) (count)) \
823 : "r" ((USItype) (x) >> 16)); \
826 __asm__ ("clz %0,%1" \
827 : "=r" ((USItype) (count)) \
828 : "r" ((USItype) (x))); \
834 #if defined(__sh__) && !__SHMEDIA__ && W_TYPE_SIZE == 32
836 #define umul_ppmm(w1, w0, u, v) \
838 "dmulu.l %2,%3\n\tsts%M1 macl,%1\n\tsts%M0 mach,%0" \
839 : "=r<" ((USItype)(w1)), \
840 "=r<" ((USItype)(w0)) \
841 : "r" ((USItype)(u)), \
847 /* This is the same algorithm as __udiv_qrnnd_c. */
848 #define UDIV_NEEDS_NORMALIZATION 1
850 #define udiv_qrnnd(q, r, n1, n0, d) \
852 extern UWtype __udiv_qrnnd_16 (UWtype, UWtype) \
853 __attribute__ ((visibility ("hidden"))); \
854 /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ \
865 : "=r" (q), "=&z" (r) \
866 : "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16) \
867 : "r1", "r2", "r4", "r5", "r6", "pr"); \
872 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
873 __asm__ ("clrt;subc %5,%1; subc %4,%0" \
874 : "=r" (sh), "=r" (sl) \
875 : "0" (ah), "1" (al), "r" (bh), "r" (bl))
879 #if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32
880 #define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
881 #define count_leading_zeros(count, x) \
884 UDItype x_ = (USItype)(x); \
887 __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_)); \
891 #define COUNT_LEADING_ZEROS_0 32
894 #if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \
896 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
897 __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
898 : "=r" ((USItype) (sh)), \
899 "=&r" ((USItype) (sl)) \
900 : "%rJ" ((USItype) (ah)), \
901 "rI" ((USItype) (bh)), \
902 "%rJ" ((USItype) (al)), \
903 "rI" ((USItype) (bl)) \
905 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
906 __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
907 : "=r" ((USItype) (sh)), \
908 "=&r" ((USItype) (sl)) \
909 : "rJ" ((USItype) (ah)), \
910 "rI" ((USItype) (bh)), \
911 "rJ" ((USItype) (al)), \
912 "rI" ((USItype) (bl)) \
914 #if defined (__sparc_v8__)
915 #define umul_ppmm(w1, w0, u, v) \
916 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
917 : "=r" ((USItype) (w1)), \
918 "=r" ((USItype) (w0)) \
919 : "r" ((USItype) (u)), \
921 #define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
922 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
923 : "=&r" ((USItype) (__q)), \
924 "=&r" ((USItype) (__r)) \
925 : "r" ((USItype) (__n1)), \
926 "r" ((USItype) (__n0)), \
927 "r" ((USItype) (__d)))
929 #if defined (__sparclite__)
930 /* This has hardware multiply but not divide. It also has two additional
931 instructions scan (ffs from high bit) and divscc. */
932 #define umul_ppmm(w1, w0, u, v) \
933 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
934 : "=r" ((USItype) (w1)), \
935 "=r" ((USItype) (w0)) \
936 : "r" ((USItype) (u)), \
938 #define udiv_qrnnd(q, r, n1, n0, d) \
939 __asm__ ("! Inlined udiv_qrnnd\n" \
940 " wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
942 " divscc %3,%4,%%g1\n" \
943 " divscc %%g1,%4,%%g1\n" \
944 " divscc %%g1,%4,%%g1\n" \
945 " divscc %%g1,%4,%%g1\n" \
946 " divscc %%g1,%4,%%g1\n" \
947 " divscc %%g1,%4,%%g1\n" \
948 " divscc %%g1,%4,%%g1\n" \
949 " divscc %%g1,%4,%%g1\n" \
950 " divscc %%g1,%4,%%g1\n" \
951 " divscc %%g1,%4,%%g1\n" \
952 " divscc %%g1,%4,%%g1\n" \
953 " divscc %%g1,%4,%%g1\n" \
954 " divscc %%g1,%4,%%g1\n" \
955 " divscc %%g1,%4,%%g1\n" \
956 " divscc %%g1,%4,%%g1\n" \
957 " divscc %%g1,%4,%%g1\n" \
958 " divscc %%g1,%4,%%g1\n" \
959 " divscc %%g1,%4,%%g1\n" \
960 " divscc %%g1,%4,%%g1\n" \
961 " divscc %%g1,%4,%%g1\n" \
962 " divscc %%g1,%4,%%g1\n" \
963 " divscc %%g1,%4,%%g1\n" \
964 " divscc %%g1,%4,%%g1\n" \
965 " divscc %%g1,%4,%%g1\n" \
966 " divscc %%g1,%4,%%g1\n" \
967 " divscc %%g1,%4,%%g1\n" \
968 " divscc %%g1,%4,%%g1\n" \
969 " divscc %%g1,%4,%%g1\n" \
970 " divscc %%g1,%4,%%g1\n" \
971 " divscc %%g1,%4,%%g1\n" \
972 " divscc %%g1,%4,%%g1\n" \
973 " divscc %%g1,%4,%0\n" \
977 "1: ! End of inline udiv_qrnnd" \
978 : "=r" ((USItype) (q)), \
979 "=r" ((USItype) (r)) \
980 : "r" ((USItype) (n1)), \
981 "r" ((USItype) (n0)), \
982 "rI" ((USItype) (d)) \
983 : "g1" __AND_CLOBBER_CC)
985 #define count_leading_zeros(count, x) \
987 __asm__ ("scan %1,1,%0" \
988 : "=r" ((USItype) (count)) \
989 : "r" ((USItype) (x))); \
991 /* Early sparclites return 63 for an argument of 0, but they warn that future
992 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
995 /* SPARC without integer multiplication and divide instructions.
996 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
997 #define umul_ppmm(w1, w0, u, v) \
998 __asm__ ("! Inlined umul_ppmm\n" \
999 " wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n"\
1000 " sra %3,31,%%o5 ! Don't move this insn\n" \
1001 " and %2,%%o5,%%o5 ! Don't move this insn\n" \
1002 " andcc %%g0,0,%%g1 ! Don't move this insn\n" \
1003 " mulscc %%g1,%3,%%g1\n" \
1004 " mulscc %%g1,%3,%%g1\n" \
1005 " mulscc %%g1,%3,%%g1\n" \
1006 " mulscc %%g1,%3,%%g1\n" \
1007 " mulscc %%g1,%3,%%g1\n" \
1008 " mulscc %%g1,%3,%%g1\n" \
1009 " mulscc %%g1,%3,%%g1\n" \
1010 " mulscc %%g1,%3,%%g1\n" \
1011 " mulscc %%g1,%3,%%g1\n" \
1012 " mulscc %%g1,%3,%%g1\n" \
1013 " mulscc %%g1,%3,%%g1\n" \
1014 " mulscc %%g1,%3,%%g1\n" \
1015 " mulscc %%g1,%3,%%g1\n" \
1016 " mulscc %%g1,%3,%%g1\n" \
1017 " mulscc %%g1,%3,%%g1\n" \
1018 " mulscc %%g1,%3,%%g1\n" \
1019 " mulscc %%g1,%3,%%g1\n" \
1020 " mulscc %%g1,%3,%%g1\n" \
1021 " mulscc %%g1,%3,%%g1\n" \
1022 " mulscc %%g1,%3,%%g1\n" \
1023 " mulscc %%g1,%3,%%g1\n" \
1024 " mulscc %%g1,%3,%%g1\n" \
1025 " mulscc %%g1,%3,%%g1\n" \
1026 " mulscc %%g1,%3,%%g1\n" \
1027 " mulscc %%g1,%3,%%g1\n" \
1028 " mulscc %%g1,%3,%%g1\n" \
1029 " mulscc %%g1,%3,%%g1\n" \
1030 " mulscc %%g1,%3,%%g1\n" \
1031 " mulscc %%g1,%3,%%g1\n" \
1032 " mulscc %%g1,%3,%%g1\n" \
1033 " mulscc %%g1,%3,%%g1\n" \
1034 " mulscc %%g1,%3,%%g1\n" \
1035 " mulscc %%g1,0,%%g1\n" \
1036 " add %%g1,%%o5,%0\n" \
1038 : "=r" ((USItype) (w1)), \
1039 "=r" ((USItype) (w0)) \
1040 : "%rI" ((USItype) (u)), \
1041 "r" ((USItype) (v)) \
1042 : "g1", "o5" __AND_CLOBBER_CC)
1043 #define UMUL_TIME 39 /* 39 instructions */
1044 /* It's quite necessary to add this much assembler for the sparc.
1045 The default udiv_qrnnd (in C) is more than 10 times slower! */
1046 #define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
1047 __asm__ ("! Inlined udiv_qrnnd\n" \
1049 " subcc %1,%2,%%g0\n" \
1051 " addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
1052 " sub %1,%2,%1 ! this kills msb of n\n" \
1053 " addx %1,%1,%1 ! so this can't give carry\n" \
1054 " subcc %%g1,1,%%g1\n" \
1056 " subcc %1,%2,%%g0\n" \
1058 " addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
1060 " sub %1,%2,%1 ! this kills msb of n\n" \
1061 "4: sub %1,%2,%1\n" \
1062 "5: addxcc %1,%1,%1\n" \
1064 " subcc %%g1,1,%%g1\n" \
1065 "! Got carry from n. Subtract next step to cancel this carry.\n" \
1067 " addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n" \
1069 "3: xnor %0,0,%0\n" \
1070 " ! End of inline udiv_qrnnd" \
1071 : "=&r" ((USItype) (__q)), \
1072 "=&r" ((USItype) (__r)) \
1073 : "r" ((USItype) (__d)), \
1074 "1" ((USItype) (__n1)), \
1075 "0" ((USItype) (__n0)) : "g1" __AND_CLOBBER_CC)
1076 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
1077 #endif /* __sparclite__ */
1078 #endif /* __sparc_v8__ */
1079 #endif /* sparc32 */
1081 #if ((defined (__sparc__) && defined (__arch64__)) || defined (__sparcv9)) \
1082 && W_TYPE_SIZE == 64
1083 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1084 __asm__ ("addcc %r4,%5,%1\n\t" \
1085 "add %r2,%3,%0\n\t" \
1086 "bcs,a,pn %%xcc, 1f\n\t" \
1089 : "=r" ((UDItype)(sh)), \
1090 "=&r" ((UDItype)(sl)) \
1091 : "%rJ" ((UDItype)(ah)), \
1092 "rI" ((UDItype)(bh)), \
1093 "%rJ" ((UDItype)(al)), \
1094 "rI" ((UDItype)(bl)) \
1097 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1098 __asm__ ("subcc %r4,%5,%1\n\t" \
1099 "sub %r2,%3,%0\n\t" \
1100 "bcs,a,pn %%xcc, 1f\n\t" \
1101 "sub %0, 1, %0\n\t" \
1103 : "=r" ((UDItype)(sh)), \
1104 "=&r" ((UDItype)(sl)) \
1105 : "rJ" ((UDItype)(ah)), \
1106 "rI" ((UDItype)(bh)), \
1107 "rJ" ((UDItype)(al)), \
1108 "rI" ((UDItype)(bl)) \
1111 #define umul_ppmm(wh, wl, u, v) \
1113 UDItype tmp1, tmp2, tmp3, tmp4; \
1114 __asm__ __volatile__ ( \
1116 "mulx %3,%6,%1\n\t" \
1117 "srlx %6,32,%2\n\t" \
1118 "mulx %2,%3,%4\n\t" \
1119 "sllx %4,32,%5\n\t" \
1121 "sub %1,%5,%5\n\t" \
1122 "srlx %5,32,%5\n\t" \
1123 "addcc %4,%5,%4\n\t" \
1124 "srlx %7,32,%5\n\t" \
1125 "mulx %3,%5,%3\n\t" \
1126 "mulx %2,%5,%5\n\t" \
1127 "sethi %%hi(0x80000000),%2\n\t" \
1128 "addcc %4,%3,%4\n\t" \
1129 "srlx %4,32,%4\n\t" \
1130 "add %2,%2,%2\n\t" \
1131 "movcc %%xcc,%%g0,%2\n\t" \
1132 "addcc %5,%4,%5\n\t" \
1133 "sllx %3,32,%3\n\t" \
1134 "add %1,%3,%1\n\t" \
1136 : "=r" ((UDItype)(wh)), \
1137 "=&r" ((UDItype)(wl)), \
1138 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
1139 : "r" ((UDItype)(u)), \
1140 "r" ((UDItype)(v)) \
1143 #define UMUL_TIME 96
1144 #define UDIV_TIME 230
1145 #endif /* sparc64 */
1147 #if defined (__vax__) && W_TYPE_SIZE == 32
1148 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1149 __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
1150 : "=g" ((USItype) (sh)), \
1151 "=&g" ((USItype) (sl)) \
1152 : "%0" ((USItype) (ah)), \
1153 "g" ((USItype) (bh)), \
1154 "%1" ((USItype) (al)), \
1155 "g" ((USItype) (bl)))
1156 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1157 __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
1158 : "=g" ((USItype) (sh)), \
1159 "=&g" ((USItype) (sl)) \
1160 : "0" ((USItype) (ah)), \
1161 "g" ((USItype) (bh)), \
1162 "1" ((USItype) (al)), \
1163 "g" ((USItype) (bl)))
1164 #define umul_ppmm(xh, xl, m0, m1) \
1168 struct {USItype __l, __h;} __i; \
1170 USItype __m0 = (m0), __m1 = (m1); \
1171 __asm__ ("emul %1,%2,$0,%0" \
1172 : "=r" (__xx.__ll) \
1175 (xh) = __xx.__i.__h; \
1176 (xl) = __xx.__i.__l; \
1177 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1178 + (((SItype) __m1 >> 31) & __m0)); \
1180 #define sdiv_qrnnd(q, r, n1, n0, d) \
1182 union {DItype __ll; \
1183 struct {SItype __l, __h;} __i; \
1185 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1186 __asm__ ("ediv %3,%2,%0,%1" \
1187 : "=g" (q), "=g" (r) \
1188 : "g" (__xx.__ll), "g" (d)); \
1190 #endif /* __vax__ */
1192 #if defined (__z8000__) && W_TYPE_SIZE == 16
1193 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1194 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1195 : "=r" ((unsigned int)(sh)), \
1196 "=&r" ((unsigned int)(sl)) \
1197 : "%0" ((unsigned int)(ah)), \
1198 "r" ((unsigned int)(bh)), \
1199 "%1" ((unsigned int)(al)), \
1200 "rQR" ((unsigned int)(bl)))
1201 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1202 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1203 : "=r" ((unsigned int)(sh)), \
1204 "=&r" ((unsigned int)(sl)) \
1205 : "0" ((unsigned int)(ah)), \
1206 "r" ((unsigned int)(bh)), \
1207 "1" ((unsigned int)(al)), \
1208 "rQR" ((unsigned int)(bl)))
1209 #define umul_ppmm(xh, xl, m0, m1) \
1211 union {long int __ll; \
1212 struct {unsigned int __h, __l;} __i; \
1214 unsigned int __m0 = (m0), __m1 = (m1); \
1215 __asm__ ("mult %S0,%H3" \
1216 : "=r" (__xx.__i.__h), \
1217 "=r" (__xx.__i.__l) \
1220 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1221 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1222 + (((signed int) __m1 >> 15) & __m0)); \
1224 #endif /* __z8000__ */
1226 #endif /* __GNUC__ */
1228 /* If this machine has no inline assembler, use C macros. */
1230 #if !defined (add_ssaaaa)
1231 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1234 __x = (al) + (bl); \
1235 (sh) = (ah) + (bh) + (__x < (al)); \
1240 #if !defined (sub_ddmmss)
1241 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1244 __x = (al) - (bl); \
1245 (sh) = (ah) - (bh) - (__x > (al)); \
1250 /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
1252 #if !defined (umul_ppmm) && defined (smul_ppmm)
1253 #define umul_ppmm(w1, w0, u, v) \
1256 UWtype __xm0 = (u), __xm1 = (v); \
1257 smul_ppmm (__w1, w0, __xm0, __xm1); \
1258 (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
1259 + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
1263 /* If we still don't have umul_ppmm, define it using plain C. */
1264 #if !defined (umul_ppmm)
1265 #define umul_ppmm(w1, w0, u, v) \
1267 UWtype __x0, __x1, __x2, __x3; \
1268 UHWtype __ul, __vl, __uh, __vh; \
1270 __ul = __ll_lowpart (u); \
1271 __uh = __ll_highpart (u); \
1272 __vl = __ll_lowpart (v); \
1273 __vh = __ll_highpart (v); \
1275 __x0 = (UWtype) __ul * __vl; \
1276 __x1 = (UWtype) __ul * __vh; \
1277 __x2 = (UWtype) __uh * __vl; \
1278 __x3 = (UWtype) __uh * __vh; \
1280 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1281 __x1 += __x2; /* but this indeed can */ \
1282 if (__x1 < __x2) /* did we get it? */ \
1283 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1285 (w1) = __x3 + __ll_highpart (__x1); \
1286 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1290 #if !defined (__umulsidi3)
1291 #define __umulsidi3(u, v) \
1293 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1297 /* Define this unconditionally, so it can be used for debugging. */
1298 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1300 UWtype __d1, __d0, __q1, __q0; \
1301 UWtype __r1, __r0, __m; \
1302 __d1 = __ll_highpart (d); \
1303 __d0 = __ll_lowpart (d); \
1305 __r1 = (n1) % __d1; \
1306 __q1 = (n1) / __d1; \
1307 __m = (UWtype) __q1 * __d0; \
1308 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1311 __q1--, __r1 += (d); \
1312 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1314 __q1--, __r1 += (d); \
1318 __r0 = __r1 % __d1; \
1319 __q0 = __r1 / __d1; \
1320 __m = (UWtype) __q0 * __d0; \
1321 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1324 __q0--, __r0 += (d); \
1327 __q0--, __r0 += (d); \
1331 (q) = (UWtype) __q1 * __ll_B | __q0; \
1335 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1336 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1337 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1338 #define udiv_qrnnd(q, r, nh, nl, d) \
1341 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1346 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1347 #if !defined (udiv_qrnnd)
1348 #define UDIV_NEEDS_NORMALIZATION 1
1349 #define udiv_qrnnd __udiv_qrnnd_c
1352 #if !defined (count_leading_zeros)
1353 #define count_leading_zeros(count, x) \
1355 UWtype __xr = (x); \
1358 if (W_TYPE_SIZE <= 32) \
1360 __a = __xr < ((UWtype)1<<2*__BITS4) \
1361 ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \
1362 : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1366 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1367 if (((__xr >> __a) & 0xff) != 0) \
1371 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1373 #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
1376 #if !defined (count_trailing_zeros)
1377 /* Define count_trailing_zeros using count_leading_zeros. The latter might be
1378 defined in asm, but if it is not, the C version above is good enough. */
1379 #define count_trailing_zeros(count, x) \
1381 UWtype __ctz_x = (x); \
1383 count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
1384 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
1388 #ifndef UDIV_NEEDS_NORMALIZATION
1389 #define UDIV_NEEDS_NORMALIZATION 0