* tree.c (walk_tree): Don't recurse into DECL_INITIAL or DECL_SIZE
[official-gcc.git] / gcc / longlong.h
blob3709910468815a11229169556bc1c4c2c2aab363
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc.
4 This definition file is free software; you can redistribute it
5 and/or modify it under the terms of the GNU General Public
6 License as published by the Free Software Foundation; either
7 version 2, or (at your option) any later version.
9 This definition file is distributed in the hope that it will be
10 useful, but WITHOUT ANY WARRANTY; without even the implied
11 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 See the GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA. */
19 #ifndef SI_TYPE_SIZE
20 #define SI_TYPE_SIZE 32
21 #endif
23 #define __BITS4 (SI_TYPE_SIZE / 4)
24 #define __ll_B (1L << (SI_TYPE_SIZE / 2))
25 #define __ll_lowpart(t) ((USItype) (t) % __ll_B)
26 #define __ll_highpart(t) ((USItype) (t) / __ll_B)
28 /* Define auxiliary asm macros.
30 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
31 multiplies two USItype integers MULTIPLER and MULTIPLICAND,
32 and generates a two-part USItype product in HIGH_PROD and
33 LOW_PROD.
35 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
36 and returns a UDItype product. This is just a variant of umul_ppmm.
38 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
39 denominator) divides a two-word unsigned integer, composed by the
40 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
41 places the quotient in QUOTIENT and the remainder in REMAINDER.
42 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
43 If, in addition, the most significant bit of DENOMINATOR must be 1,
44 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
46 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
47 denominator). Like udiv_qrnnd but the numbers are signed. The
48 quotient is rounded towards 0.
50 5) count_leading_zeros(count, x) counts the number of zero-bits from
51 the msb to the first non-zero bit. This is the number of steps X
52 needs to be shifted left to set the msb. Undefined for X == 0.
54 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
55 high_addend_2, low_addend_2) adds two two-word unsigned integers,
56 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
57 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
58 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
59 lost.
61 7) sub_ddmmss(high_difference, low_difference, high_minuend,
62 low_minuend, high_subtrahend, low_subtrahend) subtracts two
63 two-word unsigned integers, composed by HIGH_MINUEND_1 and
64 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
65 respectively. The result is placed in HIGH_DIFFERENCE and
66 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
67 and is lost.
69 If any of these macros are left undefined for a particular CPU,
70 C macros are used. */
72 /* The CPUs come in alphabetical order below.
74 Please add support for more CPUs here, or improve the current support
75 for the CPUs below!
76 (E.g. WE32100, IBM360.) */
78 #if defined (__GNUC__) && !defined (NO_ASM)
80 /* We sometimes need to clobber "cc" with gcc2, but that would not be
81 understood by gcc1. Use cpp to avoid major code duplication. */
82 #if __GNUC__ < 2
83 #define __CLOBBER_CC
84 #define __AND_CLOBBER_CC
85 #else /* __GNUC__ >= 2 */
86 #define __CLOBBER_CC : "cc"
87 #define __AND_CLOBBER_CC , "cc"
88 #endif /* __GNUC__ < 2 */
90 #if defined (__a29k__) || defined (_AM29K)
91 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
92 __asm__ ("add %1,%4,%5
93 addc %0,%2,%3" \
94 : "=r" ((USItype) (sh)), \
95 "=&r" ((USItype) (sl)) \
96 : "%r" ((USItype) (ah)), \
97 "rI" ((USItype) (bh)), \
98 "%r" ((USItype) (al)), \
99 "rI" ((USItype) (bl)))
100 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
101 __asm__ ("sub %1,%4,%5
102 subc %0,%2,%3" \
103 : "=r" ((USItype) (sh)), \
104 "=&r" ((USItype) (sl)) \
105 : "r" ((USItype) (ah)), \
106 "rI" ((USItype) (bh)), \
107 "r" ((USItype) (al)), \
108 "rI" ((USItype) (bl)))
109 #define umul_ppmm(xh, xl, m0, m1) \
110 do { \
111 USItype __m0 = (m0), __m1 = (m1); \
112 __asm__ ("multiplu %0,%1,%2" \
113 : "=r" ((USItype) (xl)) \
114 : "r" (__m0), \
115 "r" (__m1)); \
116 __asm__ ("multmu %0,%1,%2" \
117 : "=r" ((USItype) (xh)) \
118 : "r" (__m0), \
119 "r" (__m1)); \
120 } while (0)
121 #define udiv_qrnnd(q, r, n1, n0, d) \
122 __asm__ ("dividu %0,%3,%4" \
123 : "=r" ((USItype) (q)), \
124 "=q" ((USItype) (r)) \
125 : "1" ((USItype) (n1)), \
126 "r" ((USItype) (n0)), \
127 "r" ((USItype) (d)))
128 #define count_leading_zeros(count, x) \
129 __asm__ ("clz %0,%1" \
130 : "=r" ((USItype) (count)) \
131 : "r" ((USItype) (x)))
132 #endif /* __a29k__ */
134 #if defined (__arc__)
135 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
136 __asm__ ("add.f %1, %4, %5
137 adc %0, %2, %3" \
138 : "=r" ((USItype) (sh)), \
139 "=&r" ((USItype) (sl)) \
140 : "%r" ((USItype) (ah)), \
141 "rIJ" ((USItype) (bh)), \
142 "%r" ((USItype) (al)), \
143 "rIJ" ((USItype) (bl)))
144 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
145 __asm__ ("sub.f %1, %4, %5
146 sbc %0, %2, %3" \
147 : "=r" ((USItype) (sh)), \
148 "=&r" ((USItype) (sl)) \
149 : "r" ((USItype) (ah)), \
150 "rIJ" ((USItype) (bh)), \
151 "r" ((USItype) (al)), \
152 "rIJ" ((USItype) (bl)))
153 /* Call libgcc1 routine. */
154 #define umul_ppmm(w1, w0, u, v) \
155 do { \
156 DIunion __w; \
157 __w.ll = __umulsidi3 (u, v); \
158 w1 = __w.s.high; \
159 w0 = __w.s.low; \
160 } while (0)
161 #define __umulsidi3 __umulsidi3
162 UDItype __umulsidi3 (USItype, USItype);
163 #endif
165 #if defined (__arm__)
166 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
167 __asm__ ("adds %1, %4, %5
168 adc %0, %2, %3" \
169 : "=r" ((USItype) (sh)), \
170 "=&r" ((USItype) (sl)) \
171 : "%r" ((USItype) (ah)), \
172 "rI" ((USItype) (bh)), \
173 "%r" ((USItype) (al)), \
174 "rI" ((USItype) (bl)))
175 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
176 __asm__ ("subs %1, %4, %5
177 sbc %0, %2, %3" \
178 : "=r" ((USItype) (sh)), \
179 "=&r" ((USItype) (sl)) \
180 : "r" ((USItype) (ah)), \
181 "rI" ((USItype) (bh)), \
182 "r" ((USItype) (al)), \
183 "rI" ((USItype) (bl)))
184 #define umul_ppmm(xh, xl, a, b) \
185 {register USItype __t0, __t1, __t2; \
186 __asm__ ("%@ Inlined umul_ppmm
187 mov %2, %5, lsr #16
188 mov %0, %6, lsr #16
189 bic %3, %5, %2, lsl #16
190 bic %4, %6, %0, lsl #16
191 mul %1, %3, %4
192 mul %4, %2, %4
193 mul %3, %0, %3
194 mul %0, %2, %0
195 adds %3, %4, %3
196 addcs %0, %0, #65536
197 adds %1, %1, %3, lsl #16
198 adc %0, %0, %3, lsr #16" \
199 : "=&r" ((USItype) (xh)), \
200 "=r" ((USItype) (xl)), \
201 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
202 : "r" ((USItype) (a)), \
203 "r" ((USItype) (b)));}
204 #define UMUL_TIME 20
205 #define UDIV_TIME 100
206 #endif /* __arm__ */
208 #if defined (__clipper__)
209 #define umul_ppmm(w1, w0, u, v) \
210 ({union {UDItype __ll; \
211 struct {USItype __l, __h;} __i; \
212 } __xx; \
213 __asm__ ("mulwux %2,%0" \
214 : "=r" (__xx.__ll) \
215 : "%0" ((USItype) (u)), \
216 "r" ((USItype) (v))); \
217 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
218 #define smul_ppmm(w1, w0, u, v) \
219 ({union {DItype __ll; \
220 struct {SItype __l, __h;} __i; \
221 } __xx; \
222 __asm__ ("mulwx %2,%0" \
223 : "=r" (__xx.__ll) \
224 : "%0" ((SItype) (u)), \
225 "r" ((SItype) (v))); \
226 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
227 #define __umulsidi3(u, v) \
228 ({UDItype __w; \
229 __asm__ ("mulwux %2,%0" \
230 : "=r" (__w) \
231 : "%0" ((USItype) (u)), \
232 "r" ((USItype) (v))); \
233 __w; })
234 #endif /* __clipper__ */
236 #if defined (__gmicro__)
237 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
238 __asm__ ("add.w %5,%1
239 addx %3,%0" \
240 : "=g" ((USItype) (sh)), \
241 "=&g" ((USItype) (sl)) \
242 : "%0" ((USItype) (ah)), \
243 "g" ((USItype) (bh)), \
244 "%1" ((USItype) (al)), \
245 "g" ((USItype) (bl)))
246 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
247 __asm__ ("sub.w %5,%1
248 subx %3,%0" \
249 : "=g" ((USItype) (sh)), \
250 "=&g" ((USItype) (sl)) \
251 : "0" ((USItype) (ah)), \
252 "g" ((USItype) (bh)), \
253 "1" ((USItype) (al)), \
254 "g" ((USItype) (bl)))
255 #define umul_ppmm(ph, pl, m0, m1) \
256 __asm__ ("mulx %3,%0,%1" \
257 : "=g" ((USItype) (ph)), \
258 "=r" ((USItype) (pl)) \
259 : "%0" ((USItype) (m0)), \
260 "g" ((USItype) (m1)))
261 #define udiv_qrnnd(q, r, nh, nl, d) \
262 __asm__ ("divx %4,%0,%1" \
263 : "=g" ((USItype) (q)), \
264 "=r" ((USItype) (r)) \
265 : "1" ((USItype) (nh)), \
266 "0" ((USItype) (nl)), \
267 "g" ((USItype) (d)))
268 #define count_leading_zeros(count, x) \
269 __asm__ ("bsch/1 %1,%0" \
270 : "=g" (count) \
271 : "g" ((USItype) (x)), \
272 "0" ((USItype) 0))
273 #endif
275 #if defined (__hppa)
276 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
277 __asm__ ("add %4,%5,%1
278 addc %2,%3,%0" \
279 : "=r" ((USItype) (sh)), \
280 "=&r" ((USItype) (sl)) \
281 : "%rM" ((USItype) (ah)), \
282 "rM" ((USItype) (bh)), \
283 "%rM" ((USItype) (al)), \
284 "rM" ((USItype) (bl)))
285 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
286 __asm__ ("sub %4,%5,%1
287 subb %2,%3,%0" \
288 : "=r" ((USItype) (sh)), \
289 "=&r" ((USItype) (sl)) \
290 : "rM" ((USItype) (ah)), \
291 "rM" ((USItype) (bh)), \
292 "rM" ((USItype) (al)), \
293 "rM" ((USItype) (bl)))
294 #if defined (_PA_RISC1_1)
295 #define umul_ppmm(w1, w0, u, v) \
296 do { \
297 union \
299 UDItype __f; \
300 struct {USItype __w1, __w0;} __w1w0; \
301 } __t; \
302 __asm__ ("xmpyu %1,%2,%0" \
303 : "=x" (__t.__f) \
304 : "x" ((USItype) (u)), \
305 "x" ((USItype) (v))); \
306 (w1) = __t.__w1w0.__w1; \
307 (w0) = __t.__w1w0.__w0; \
308 } while (0)
309 #define UMUL_TIME 8
310 #else
311 #define UMUL_TIME 30
312 #endif
313 #define UDIV_TIME 40
314 #define count_leading_zeros(count, x) \
315 do { \
316 USItype __tmp; \
317 __asm__ ( \
318 "ldi 1,%0
319 extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
320 extru,tr %1,15,16,%1 ; No. Shift down, skip add.
321 ldo 16(%0),%0 ; Yes. Perform add.
322 extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
323 extru,tr %1,23,8,%1 ; No. Shift down, skip add.
324 ldo 8(%0),%0 ; Yes. Perform add.
325 extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
326 extru,tr %1,27,4,%1 ; No. Shift down, skip add.
327 ldo 4(%0),%0 ; Yes. Perform add.
328 extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
329 extru,tr %1,29,2,%1 ; No. Shift down, skip add.
330 ldo 2(%0),%0 ; Yes. Perform add.
331 extru %1,30,1,%1 ; Extract bit 1.
332 sub %0,%1,%0 ; Subtract it.
333 " : "=r" (count), "=r" (__tmp) : "1" (x)); \
334 } while (0)
335 #endif
337 #if defined (__i386__) || defined (__i486__)
338 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
339 __asm__ ("addl %5,%1
340 adcl %3,%0" \
341 : "=r" ((USItype) (sh)), \
342 "=&r" ((USItype) (sl)) \
343 : "%0" ((USItype) (ah)), \
344 "g" ((USItype) (bh)), \
345 "%1" ((USItype) (al)), \
346 "g" ((USItype) (bl)))
347 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
348 __asm__ ("subl %5,%1
349 sbbl %3,%0" \
350 : "=r" ((USItype) (sh)), \
351 "=&r" ((USItype) (sl)) \
352 : "0" ((USItype) (ah)), \
353 "g" ((USItype) (bh)), \
354 "1" ((USItype) (al)), \
355 "g" ((USItype) (bl)))
356 #define umul_ppmm(w1, w0, u, v) \
357 __asm__ ("mull %3" \
358 : "=a" ((USItype) (w0)), \
359 "=d" ((USItype) (w1)) \
360 : "%0" ((USItype) (u)), \
361 "rm" ((USItype) (v)))
362 #define udiv_qrnnd(q, r, n1, n0, d) \
363 __asm__ ("divl %4" \
364 : "=a" ((USItype) (q)), \
365 "=d" ((USItype) (r)) \
366 : "0" ((USItype) (n0)), \
367 "1" ((USItype) (n1)), \
368 "rm" ((USItype) (d)))
369 #define count_leading_zeros(count, x) \
370 do { \
371 USItype __cbtmp; \
372 __asm__ ("bsrl %1,%0" \
373 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
374 (count) = __cbtmp ^ 31; \
375 } while (0)
376 #define UMUL_TIME 40
377 #define UDIV_TIME 40
378 #endif /* 80x86 */
380 #if defined (__i860__)
381 #if 0
382 /* Make sure these patterns really improve the code before
383 switching them on. */
384 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
385 do { \
386 union \
388 DItype __ll; \
389 struct {USItype __l, __h;} __i; \
390 } __a, __b, __s; \
391 __a.__i.__l = (al); \
392 __a.__i.__h = (ah); \
393 __b.__i.__l = (bl); \
394 __b.__i.__h = (bh); \
395 __asm__ ("fiadd.dd %1,%2,%0" \
396 : "=f" (__s.__ll) \
397 : "%f" (__a.__ll), "f" (__b.__ll)); \
398 (sh) = __s.__i.__h; \
399 (sl) = __s.__i.__l; \
400 } while (0)
401 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
402 do { \
403 union \
405 DItype __ll; \
406 struct {USItype __l, __h;} __i; \
407 } __a, __b, __s; \
408 __a.__i.__l = (al); \
409 __a.__i.__h = (ah); \
410 __b.__i.__l = (bl); \
411 __b.__i.__h = (bh); \
412 __asm__ ("fisub.dd %1,%2,%0" \
413 : "=f" (__s.__ll) \
414 : "%f" (__a.__ll), "f" (__b.__ll)); \
415 (sh) = __s.__i.__h; \
416 (sl) = __s.__i.__l; \
417 } while (0)
418 #endif
419 #endif /* __i860__ */
421 #if defined (__i960__)
422 #define umul_ppmm(w1, w0, u, v) \
423 ({union {UDItype __ll; \
424 struct {USItype __l, __h;} __i; \
425 } __xx; \
426 __asm__ ("emul %2,%1,%0" \
427 : "=d" (__xx.__ll) \
428 : "%dI" ((USItype) (u)), \
429 "dI" ((USItype) (v))); \
430 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
431 #define __umulsidi3(u, v) \
432 ({UDItype __w; \
433 __asm__ ("emul %2,%1,%0" \
434 : "=d" (__w) \
435 : "%dI" ((USItype) (u)), \
436 "dI" ((USItype) (v))); \
437 __w; })
438 #endif /* __i960__ */
440 #if defined (__M32R__)
441 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
442 /* The cmp clears the condition bit. */ \
443 __asm__ ("cmp %0,%0
444 addx %%5,%1
445 addx %%3,%0" \
446 : "=r" ((USItype) (sh)), \
447 "=&r" ((USItype) (sl)) \
448 : "%0" ((USItype) (ah)), \
449 "r" ((USItype) (bh)), \
450 "%1" ((USItype) (al)), \
451 "r" ((USItype) (bl)) \
452 : "cbit")
453 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
454 /* The cmp clears the condition bit. */ \
455 __asm__ ("cmp %0,%0
456 subx %5,%1
457 subx %3,%0" \
458 : "=r" ((USItype) (sh)), \
459 "=&r" ((USItype) (sl)) \
460 : "0" ((USItype) (ah)), \
461 "r" ((USItype) (bh)), \
462 "1" ((USItype) (al)), \
463 "r" ((USItype) (bl)) \
464 : "cbit")
465 #endif /* __M32R__ */
467 #if defined (__mc68000__)
468 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
469 __asm__ ("add%.l %5,%1
470 addx%.l %3,%0" \
471 : "=d" ((USItype) (sh)), \
472 "=&d" ((USItype) (sl)) \
473 : "%0" ((USItype) (ah)), \
474 "d" ((USItype) (bh)), \
475 "%1" ((USItype) (al)), \
476 "g" ((USItype) (bl)))
477 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
478 __asm__ ("sub%.l %5,%1
479 subx%.l %3,%0" \
480 : "=d" ((USItype) (sh)), \
481 "=&d" ((USItype) (sl)) \
482 : "0" ((USItype) (ah)), \
483 "d" ((USItype) (bh)), \
484 "1" ((USItype) (al)), \
485 "g" ((USItype) (bl)))
487 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
488 #if defined (__mc68020__) || defined(mc68020) \
489 || defined(__mc68030__) || defined(mc68030) \
490 || defined(__mc68040__) || defined(mc68040) \
491 || defined(__mcpu32__) || defined(mcpu32) \
492 || defined(__NeXT__)
493 #define umul_ppmm(w1, w0, u, v) \
494 __asm__ ("mulu%.l %3,%1:%0" \
495 : "=d" ((USItype) (w0)), \
496 "=d" ((USItype) (w1)) \
497 : "%0" ((USItype) (u)), \
498 "dmi" ((USItype) (v)))
499 #define UMUL_TIME 45
500 #define udiv_qrnnd(q, r, n1, n0, d) \
501 __asm__ ("divu%.l %4,%1:%0" \
502 : "=d" ((USItype) (q)), \
503 "=d" ((USItype) (r)) \
504 : "0" ((USItype) (n0)), \
505 "1" ((USItype) (n1)), \
506 "dmi" ((USItype) (d)))
507 #define UDIV_TIME 90
508 #define sdiv_qrnnd(q, r, n1, n0, d) \
509 __asm__ ("divs%.l %4,%1:%0" \
510 : "=d" ((USItype) (q)), \
511 "=d" ((USItype) (r)) \
512 : "0" ((USItype) (n0)), \
513 "1" ((USItype) (n1)), \
514 "dmi" ((USItype) (d)))
516 #else /* not mc68020 */
517 #if !defined(__mcf5200__)
518 /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
519 #define umul_ppmm(xh, xl, a, b) \
520 __asm__ ("| Inlined umul_ppmm
521 move%.l %2,%/d0
522 move%.l %3,%/d1
523 move%.l %/d0,%/d2
524 swap %/d0
525 move%.l %/d1,%/d3
526 swap %/d1
527 move%.w %/d2,%/d4
528 mulu %/d3,%/d4
529 mulu %/d1,%/d2
530 mulu %/d0,%/d3
531 mulu %/d0,%/d1
532 move%.l %/d4,%/d0
533 eor%.w %/d0,%/d0
534 swap %/d0
535 add%.l %/d0,%/d2
536 add%.l %/d3,%/d2
537 jcc 1f
538 add%.l %#65536,%/d1
539 1: swap %/d2
540 moveq %#0,%/d0
541 move%.w %/d2,%/d0
542 move%.w %/d4,%/d2
543 move%.l %/d2,%1
544 add%.l %/d1,%/d0
545 move%.l %/d0,%0" \
546 : "=g" ((USItype) (xh)), \
547 "=g" ((USItype) (xl)) \
548 : "g" ((USItype) (a)), \
549 "g" ((USItype) (b)) \
550 : "d0", "d1", "d2", "d3", "d4")
551 #define UMUL_TIME 100
552 #define UDIV_TIME 400
553 #endif /* not mcf5200 */
554 #endif /* not mc68020 */
556 /* The '020, '030, '040 and '060 have bitfield insns. */
557 #if defined (__mc68020__) || defined(mc68020) \
558 || defined(__mc68030__) || defined(mc68030) \
559 || defined(__mc68040__) || defined(mc68040) \
560 || defined(__mc68060__) || defined(mc68060) \
561 || defined(__NeXT__)
562 #define count_leading_zeros(count, x) \
563 __asm__ ("bfffo %1{%b2:%b2},%0" \
564 : "=d" ((USItype) (count)) \
565 : "od" ((USItype) (x)), "n" (0))
566 #endif
567 #endif /* mc68000 */
569 #if defined (__m88000__)
570 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
571 __asm__ ("addu.co %1,%r4,%r5
572 addu.ci %0,%r2,%r3" \
573 : "=r" ((USItype) (sh)), \
574 "=&r" ((USItype) (sl)) \
575 : "%rJ" ((USItype) (ah)), \
576 "rJ" ((USItype) (bh)), \
577 "%rJ" ((USItype) (al)), \
578 "rJ" ((USItype) (bl)))
579 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
580 __asm__ ("subu.co %1,%r4,%r5
581 subu.ci %0,%r2,%r3" \
582 : "=r" ((USItype) (sh)), \
583 "=&r" ((USItype) (sl)) \
584 : "rJ" ((USItype) (ah)), \
585 "rJ" ((USItype) (bh)), \
586 "rJ" ((USItype) (al)), \
587 "rJ" ((USItype) (bl)))
588 #define count_leading_zeros(count, x) \
589 do { \
590 USItype __cbtmp; \
591 __asm__ ("ff1 %0,%1" \
592 : "=r" (__cbtmp) \
593 : "r" ((USItype) (x))); \
594 (count) = __cbtmp ^ 31; \
595 } while (0)
596 #if defined (__mc88110__)
597 #define umul_ppmm(wh, wl, u, v) \
598 do { \
599 union {UDItype __ll; \
600 struct {USItype __h, __l;} __i; \
601 } __xx; \
602 __asm__ ("mulu.d %0,%1,%2" \
603 : "=r" (__xx.__ll) \
604 : "r" ((USItype) (u)), \
605 "r" ((USItype) (v))); \
606 (wh) = __xx.__i.__h; \
607 (wl) = __xx.__i.__l; \
608 } while (0)
609 #define udiv_qrnnd(q, r, n1, n0, d) \
610 ({union {UDItype __ll; \
611 struct {USItype __h, __l;} __i; \
612 } __xx; \
613 USItype __q; \
614 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
615 __asm__ ("divu.d %0,%1,%2" \
616 : "=r" (__q) \
617 : "r" (__xx.__ll), \
618 "r" ((USItype) (d))); \
619 (r) = (n0) - __q * (d); (q) = __q; })
620 #define UMUL_TIME 5
621 #define UDIV_TIME 25
622 #else
623 #define UMUL_TIME 17
624 #define UDIV_TIME 150
625 #endif /* __mc88110__ */
626 #endif /* __m88000__ */
628 #if defined (__mips__)
629 #define umul_ppmm(w1, w0, u, v) \
630 __asm__ ("multu %2,%3" \
631 : "=l" ((USItype) (w0)), \
632 "=h" ((USItype) (w1)) \
633 : "d" ((USItype) (u)), \
634 "d" ((USItype) (v)))
635 #define UMUL_TIME 10
636 #define UDIV_TIME 100
637 #endif /* __mips__ */
639 #if defined (__ns32000__)
640 #define umul_ppmm(w1, w0, u, v) \
641 ({union {UDItype __ll; \
642 struct {USItype __l, __h;} __i; \
643 } __xx; \
644 __asm__ ("meid %2,%0" \
645 : "=g" (__xx.__ll) \
646 : "%0" ((USItype) (u)), \
647 "g" ((USItype) (v))); \
648 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
649 #define __umulsidi3(u, v) \
650 ({UDItype __w; \
651 __asm__ ("meid %2,%0" \
652 : "=g" (__w) \
653 : "%0" ((USItype) (u)), \
654 "g" ((USItype) (v))); \
655 __w; })
656 #define udiv_qrnnd(q, r, n1, n0, d) \
657 ({union {UDItype __ll; \
658 struct {USItype __l, __h;} __i; \
659 } __xx; \
660 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
661 __asm__ ("deid %2,%0" \
662 : "=g" (__xx.__ll) \
663 : "0" (__xx.__ll), \
664 "g" ((USItype) (d))); \
665 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
666 #endif /* __ns32000__ */
668 #if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
669 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
670 do { \
671 if (__builtin_constant_p (bh) && (bh) == 0) \
672 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
673 : "=r" ((USItype) (sh)), \
674 "=&r" ((USItype) (sl)) \
675 : "%r" ((USItype) (ah)), \
676 "%r" ((USItype) (al)), \
677 "rI" ((USItype) (bl))); \
678 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
679 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
680 : "=r" ((USItype) (sh)), \
681 "=&r" ((USItype) (sl)) \
682 : "%r" ((USItype) (ah)), \
683 "%r" ((USItype) (al)), \
684 "rI" ((USItype) (bl))); \
685 else \
686 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
687 : "=r" ((USItype) (sh)), \
688 "=&r" ((USItype) (sl)) \
689 : "%r" ((USItype) (ah)), \
690 "r" ((USItype) (bh)), \
691 "%r" ((USItype) (al)), \
692 "rI" ((USItype) (bl))); \
693 } while (0)
694 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
695 do { \
696 if (__builtin_constant_p (ah) && (ah) == 0) \
697 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
698 : "=r" ((USItype) (sh)), \
699 "=&r" ((USItype) (sl)) \
700 : "r" ((USItype) (bh)), \
701 "rI" ((USItype) (al)), \
702 "r" ((USItype) (bl))); \
703 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
704 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
705 : "=r" ((USItype) (sh)), \
706 "=&r" ((USItype) (sl)) \
707 : "r" ((USItype) (bh)), \
708 "rI" ((USItype) (al)), \
709 "r" ((USItype) (bl))); \
710 else if (__builtin_constant_p (bh) && (bh) == 0) \
711 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
712 : "=r" ((USItype) (sh)), \
713 "=&r" ((USItype) (sl)) \
714 : "r" ((USItype) (ah)), \
715 "rI" ((USItype) (al)), \
716 "r" ((USItype) (bl))); \
717 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
718 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
719 : "=r" ((USItype) (sh)), \
720 "=&r" ((USItype) (sl)) \
721 : "r" ((USItype) (ah)), \
722 "rI" ((USItype) (al)), \
723 "r" ((USItype) (bl))); \
724 else \
725 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
726 : "=r" ((USItype) (sh)), \
727 "=&r" ((USItype) (sl)) \
728 : "r" ((USItype) (ah)), \
729 "r" ((USItype) (bh)), \
730 "rI" ((USItype) (al)), \
731 "r" ((USItype) (bl))); \
732 } while (0)
733 #define count_leading_zeros(count, x) \
734 __asm__ ("{cntlz|cntlzw} %0,%1" \
735 : "=r" ((USItype) (count)) \
736 : "r" ((USItype) (x)))
737 #if defined (_ARCH_PPC)
738 #define umul_ppmm(ph, pl, m0, m1) \
739 do { \
740 USItype __m0 = (m0), __m1 = (m1); \
741 __asm__ ("mulhwu %0,%1,%2" \
742 : "=r" ((USItype) ph) \
743 : "%r" (__m0), \
744 "r" (__m1)); \
745 (pl) = __m0 * __m1; \
746 } while (0)
747 #define UMUL_TIME 15
748 #define smul_ppmm(ph, pl, m0, m1) \
749 do { \
750 SItype __m0 = (m0), __m1 = (m1); \
751 __asm__ ("mulhw %0,%1,%2" \
752 : "=r" ((SItype) ph) \
753 : "%r" (__m0), \
754 "r" (__m1)); \
755 (pl) = __m0 * __m1; \
756 } while (0)
757 #define SMUL_TIME 14
758 #define UDIV_TIME 120
759 #else
760 #define umul_ppmm(xh, xl, m0, m1) \
761 do { \
762 USItype __m0 = (m0), __m1 = (m1); \
763 __asm__ ("mul %0,%2,%3" \
764 : "=r" ((USItype) (xh)), \
765 "=q" ((USItype) (xl)) \
766 : "r" (__m0), \
767 "r" (__m1)); \
768 (xh) += ((((SItype) __m0 >> 31) & __m1) \
769 + (((SItype) __m1 >> 31) & __m0)); \
770 } while (0)
771 #define UMUL_TIME 8
772 #define smul_ppmm(xh, xl, m0, m1) \
773 __asm__ ("mul %0,%2,%3" \
774 : "=r" ((SItype) (xh)), \
775 "=q" ((SItype) (xl)) \
776 : "r" (m0), \
777 "r" (m1))
778 #define SMUL_TIME 4
779 #define sdiv_qrnnd(q, r, nh, nl, d) \
780 __asm__ ("div %0,%2,%4" \
781 : "=r" ((SItype) (q)), "=q" ((SItype) (r)) \
782 : "r" ((SItype) (nh)), "1" ((SItype) (nl)), "r" ((SItype) (d)))
783 #define UDIV_TIME 100
784 #endif
785 #endif /* Power architecture variants. */
787 #if defined (__pyr__)
788 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
789 __asm__ ("addw %5,%1
790 addwc %3,%0" \
791 : "=r" ((USItype) (sh)), \
792 "=&r" ((USItype) (sl)) \
793 : "%0" ((USItype) (ah)), \
794 "g" ((USItype) (bh)), \
795 "%1" ((USItype) (al)), \
796 "g" ((USItype) (bl)))
797 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
798 __asm__ ("subw %5,%1
799 subwb %3,%0" \
800 : "=r" ((USItype) (sh)), \
801 "=&r" ((USItype) (sl)) \
802 : "0" ((USItype) (ah)), \
803 "g" ((USItype) (bh)), \
804 "1" ((USItype) (al)), \
805 "g" ((USItype) (bl)))
806 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
807 #define umul_ppmm(w1, w0, u, v) \
808 ({union {UDItype __ll; \
809 struct {USItype __h, __l;} __i; \
810 } __xx; \
811 __asm__ ("movw %1,%R0
812 uemul %2,%0" \
813 : "=&r" (__xx.__ll) \
814 : "g" ((USItype) (u)), \
815 "g" ((USItype) (v))); \
816 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
817 #endif /* __pyr__ */
819 #if defined (__ibm032__) /* RT/ROMP */
820 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
821 __asm__ ("a %1,%5
822 ae %0,%3" \
823 : "=r" ((USItype) (sh)), \
824 "=&r" ((USItype) (sl)) \
825 : "%0" ((USItype) (ah)), \
826 "r" ((USItype) (bh)), \
827 "%1" ((USItype) (al)), \
828 "r" ((USItype) (bl)))
829 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
830 __asm__ ("s %1,%5
831 se %0,%3" \
832 : "=r" ((USItype) (sh)), \
833 "=&r" ((USItype) (sl)) \
834 : "0" ((USItype) (ah)), \
835 "r" ((USItype) (bh)), \
836 "1" ((USItype) (al)), \
837 "r" ((USItype) (bl)))
838 #define umul_ppmm(ph, pl, m0, m1) \
839 do { \
840 USItype __m0 = (m0), __m1 = (m1); \
841 __asm__ ( \
842 "s r2,r2
843 mts r10,%2
844 m r2,%3
845 m r2,%3
846 m r2,%3
847 m r2,%3
848 m r2,%3
849 m r2,%3
850 m r2,%3
851 m r2,%3
852 m r2,%3
853 m r2,%3
854 m r2,%3
855 m r2,%3
856 m r2,%3
857 m r2,%3
858 m r2,%3
859 m r2,%3
860 cas %0,r2,r0
861 mfs r10,%1" \
862 : "=r" ((USItype) (ph)), \
863 "=r" ((USItype) (pl)) \
864 : "%r" (__m0), \
865 "r" (__m1) \
866 : "r2"); \
867 (ph) += ((((SItype) __m0 >> 31) & __m1) \
868 + (((SItype) __m1 >> 31) & __m0)); \
869 } while (0)
870 #define UMUL_TIME 20
871 #define UDIV_TIME 200
872 #define count_leading_zeros(count, x) \
873 do { \
874 if ((x) >= 0x10000) \
875 __asm__ ("clz %0,%1" \
876 : "=r" ((USItype) (count)) \
877 : "r" ((USItype) (x) >> 16)); \
878 else \
880 __asm__ ("clz %0,%1" \
881 : "=r" ((USItype) (count)) \
882 : "r" ((USItype) (x))); \
883 (count) += 16; \
885 } while (0)
886 #endif
888 #if defined (__sparc__)
889 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
890 __asm__ ("addcc %r4,%5,%1
891 addx %r2,%3,%0" \
892 : "=r" ((USItype) (sh)), \
893 "=&r" ((USItype) (sl)) \
894 : "%rJ" ((USItype) (ah)), \
895 "rI" ((USItype) (bh)), \
896 "%rJ" ((USItype) (al)), \
897 "rI" ((USItype) (bl)) \
898 __CLOBBER_CC)
899 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
900 __asm__ ("subcc %r4,%5,%1
901 subx %r2,%3,%0" \
902 : "=r" ((USItype) (sh)), \
903 "=&r" ((USItype) (sl)) \
904 : "rJ" ((USItype) (ah)), \
905 "rI" ((USItype) (bh)), \
906 "rJ" ((USItype) (al)), \
907 "rI" ((USItype) (bl)) \
908 __CLOBBER_CC)
909 #if defined (__sparc_v8__)
910 #define umul_ppmm(w1, w0, u, v) \
911 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
912 : "=r" ((USItype) (w1)), \
913 "=r" ((USItype) (w0)) \
914 : "r" ((USItype) (u)), \
915 "r" ((USItype) (v)))
916 #define udiv_qrnnd(q, r, n1, n0, d) \
917 __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
918 : "=&r" ((USItype) (q)), \
919 "=&r" ((USItype) (r)) \
920 : "r" ((USItype) (n1)), \
921 "r" ((USItype) (n0)), \
922 "r" ((USItype) (d)))
923 #else
924 #if defined (__sparclite__)
925 /* This has hardware multiply but not divide. It also has two additional
926 instructions scan (ffs from high bit) and divscc. */
927 #define umul_ppmm(w1, w0, u, v) \
928 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
929 : "=r" ((USItype) (w1)), \
930 "=r" ((USItype) (w0)) \
931 : "r" ((USItype) (u)), \
932 "r" ((USItype) (v)))
933 #define udiv_qrnnd(q, r, n1, n0, d) \
934 __asm__ ("! Inlined udiv_qrnnd
935 wr %%g0,%2,%%y ! Not a delayed write for sparclite
936 tst %%g0
937 divscc %3,%4,%%g1
938 divscc %%g1,%4,%%g1
939 divscc %%g1,%4,%%g1
940 divscc %%g1,%4,%%g1
941 divscc %%g1,%4,%%g1
942 divscc %%g1,%4,%%g1
943 divscc %%g1,%4,%%g1
944 divscc %%g1,%4,%%g1
945 divscc %%g1,%4,%%g1
946 divscc %%g1,%4,%%g1
947 divscc %%g1,%4,%%g1
948 divscc %%g1,%4,%%g1
949 divscc %%g1,%4,%%g1
950 divscc %%g1,%4,%%g1
951 divscc %%g1,%4,%%g1
952 divscc %%g1,%4,%%g1
953 divscc %%g1,%4,%%g1
954 divscc %%g1,%4,%%g1
955 divscc %%g1,%4,%%g1
956 divscc %%g1,%4,%%g1
957 divscc %%g1,%4,%%g1
958 divscc %%g1,%4,%%g1
959 divscc %%g1,%4,%%g1
960 divscc %%g1,%4,%%g1
961 divscc %%g1,%4,%%g1
962 divscc %%g1,%4,%%g1
963 divscc %%g1,%4,%%g1
964 divscc %%g1,%4,%%g1
965 divscc %%g1,%4,%%g1
966 divscc %%g1,%4,%%g1
967 divscc %%g1,%4,%%g1
968 divscc %%g1,%4,%0
969 rd %%y,%1
970 bl,a 1f
971 add %1,%4,%1
972 1: ! End of inline udiv_qrnnd" \
973 : "=r" ((USItype) (q)), \
974 "=r" ((USItype) (r)) \
975 : "r" ((USItype) (n1)), \
976 "r" ((USItype) (n0)), \
977 "rI" ((USItype) (d)) \
978 : "g1" __AND_CLOBBER_CC)
979 #define UDIV_TIME 37
980 #define count_leading_zeros(count, x) \
981 do { \
982 __asm__ ("scan %1,1,%0" \
983 : "=r" ((USItype) (count)) \
984 : "r" ((USItype) (x))); \
985 } while (0)
986 #else
987 /* SPARC without integer multiplication and divide instructions.
988 (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
989 #define umul_ppmm(w1, w0, u, v) \
990 __asm__ ("! Inlined umul_ppmm
991 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
992 sra %3,31,%%g2 ! Don't move this insn
993 and %2,%%g2,%%g2 ! Don't move this insn
994 andcc %%g0,0,%%g1 ! Don't move this insn
995 mulscc %%g1,%3,%%g1
996 mulscc %%g1,%3,%%g1
997 mulscc %%g1,%3,%%g1
998 mulscc %%g1,%3,%%g1
999 mulscc %%g1,%3,%%g1
1000 mulscc %%g1,%3,%%g1
1001 mulscc %%g1,%3,%%g1
1002 mulscc %%g1,%3,%%g1
1003 mulscc %%g1,%3,%%g1
1004 mulscc %%g1,%3,%%g1
1005 mulscc %%g1,%3,%%g1
1006 mulscc %%g1,%3,%%g1
1007 mulscc %%g1,%3,%%g1
1008 mulscc %%g1,%3,%%g1
1009 mulscc %%g1,%3,%%g1
1010 mulscc %%g1,%3,%%g1
1011 mulscc %%g1,%3,%%g1
1012 mulscc %%g1,%3,%%g1
1013 mulscc %%g1,%3,%%g1
1014 mulscc %%g1,%3,%%g1
1015 mulscc %%g1,%3,%%g1
1016 mulscc %%g1,%3,%%g1
1017 mulscc %%g1,%3,%%g1
1018 mulscc %%g1,%3,%%g1
1019 mulscc %%g1,%3,%%g1
1020 mulscc %%g1,%3,%%g1
1021 mulscc %%g1,%3,%%g1
1022 mulscc %%g1,%3,%%g1
1023 mulscc %%g1,%3,%%g1
1024 mulscc %%g1,%3,%%g1
1025 mulscc %%g1,%3,%%g1
1026 mulscc %%g1,%3,%%g1
1027 mulscc %%g1,0,%%g1
1028 add %%g1,%%g2,%0
1029 rd %%y,%1" \
1030 : "=r" ((USItype) (w1)), \
1031 "=r" ((USItype) (w0)) \
1032 : "%rI" ((USItype) (u)), \
1033 "r" ((USItype) (v)) \
1034 : "g1", "g2" __AND_CLOBBER_CC)
1035 #define UMUL_TIME 39 /* 39 instructions */
1036 /* It's quite necessary to add this much assembler for the sparc.
1037 The default udiv_qrnnd (in C) is more than 10 times slower! */
1038 #define udiv_qrnnd(q, r, n1, n0, d) \
1039 __asm__ ("! Inlined udiv_qrnnd
1040 mov 32,%%g1
1041 subcc %1,%2,%%g0
1042 1: bcs 5f
1043 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1044 sub %1,%2,%1 ! this kills msb of n
1045 addx %1,%1,%1 ! so this can't give carry
1046 subcc %%g1,1,%%g1
1047 2: bne 1b
1048 subcc %1,%2,%%g0
1049 bcs 3f
1050 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
1051 b 3f
1052 sub %1,%2,%1 ! this kills msb of n
1053 4: sub %1,%2,%1
1054 5: addxcc %1,%1,%1
1055 bcc 2b
1056 subcc %%g1,1,%%g1
1057 ! Got carry from n. Subtract next step to cancel this carry.
1058 bne 4b
1059 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
1060 sub %1,%2,%1
1061 3: xnor %0,0,%0
1062 ! End of inline udiv_qrnnd" \
1063 : "=&r" ((USItype) (q)), \
1064 "=&r" ((USItype) (r)) \
1065 : "r" ((USItype) (d)), \
1066 "1" ((USItype) (n1)), \
1067 "0" ((USItype) (n0)) : "g1" __AND_CLOBBER_CC)
1068 #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
1069 #endif /* __sparclite__ */
1070 #endif /* __sparc_v8__ */
1071 #endif /* __sparc__ */
1073 #if defined (__vax__)
1074 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1075 __asm__ ("addl2 %5,%1
1076 adwc %3,%0" \
1077 : "=g" ((USItype) (sh)), \
1078 "=&g" ((USItype) (sl)) \
1079 : "%0" ((USItype) (ah)), \
1080 "g" ((USItype) (bh)), \
1081 "%1" ((USItype) (al)), \
1082 "g" ((USItype) (bl)))
1083 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1084 __asm__ ("subl2 %5,%1
1085 sbwc %3,%0" \
1086 : "=g" ((USItype) (sh)), \
1087 "=&g" ((USItype) (sl)) \
1088 : "0" ((USItype) (ah)), \
1089 "g" ((USItype) (bh)), \
1090 "1" ((USItype) (al)), \
1091 "g" ((USItype) (bl)))
1092 #define umul_ppmm(xh, xl, m0, m1) \
1093 do { \
1094 union { \
1095 UDItype __ll; \
1096 struct {USItype __l, __h;} __i; \
1097 } __xx; \
1098 USItype __m0 = (m0), __m1 = (m1); \
1099 __asm__ ("emul %1,%2,$0,%0" \
1100 : "=r" (__xx.__ll) \
1101 : "g" (__m0), \
1102 "g" (__m1)); \
1103 (xh) = __xx.__i.__h; \
1104 (xl) = __xx.__i.__l; \
1105 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1106 + (((SItype) __m1 >> 31) & __m0)); \
1107 } while (0)
1108 #define sdiv_qrnnd(q, r, n1, n0, d) \
1109 do { \
1110 union {DItype __ll; \
1111 struct {SItype __l, __h;} __i; \
1112 } __xx; \
1113 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1114 __asm__ ("ediv %3,%2,%0,%1" \
1115 : "=g" (q), "=g" (r) \
1116 : "g" (__xx.__ll), "g" (d)); \
1117 } while (0)
1118 #endif /* __vax__ */
1120 #endif /* __GNUC__ */
1122 /* If this machine has no inline assembler, use C macros. */
1124 #if !defined (add_ssaaaa)
1125 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1126 do { \
1127 USItype __x; \
1128 __x = (al) + (bl); \
1129 (sh) = (ah) + (bh) + (__x < (al)); \
1130 (sl) = __x; \
1131 } while (0)
1132 #endif
1134 #if !defined (sub_ddmmss)
1135 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1136 do { \
1137 USItype __x; \
1138 __x = (al) - (bl); \
1139 (sh) = (ah) - (bh) - (__x > (al)); \
1140 (sl) = __x; \
1141 } while (0)
1142 #endif
1144 #if !defined (umul_ppmm)
1145 #define umul_ppmm(w1, w0, u, v) \
1146 do { \
1147 USItype __x0, __x1, __x2, __x3; \
1148 USItype __ul, __vl, __uh, __vh; \
1150 __ul = __ll_lowpart (u); \
1151 __uh = __ll_highpart (u); \
1152 __vl = __ll_lowpart (v); \
1153 __vh = __ll_highpart (v); \
1155 __x0 = (USItype) __ul * __vl; \
1156 __x1 = (USItype) __ul * __vh; \
1157 __x2 = (USItype) __uh * __vl; \
1158 __x3 = (USItype) __uh * __vh; \
1160 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1161 __x1 += __x2; /* but this indeed can */ \
1162 if (__x1 < __x2) /* did we get it? */ \
1163 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1165 (w1) = __x3 + __ll_highpart (__x1); \
1166 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
1167 } while (0)
1168 #endif
1170 #if !defined (__umulsidi3)
1171 #define __umulsidi3(u, v) \
1172 ({DIunion __w; \
1173 umul_ppmm (__w.s.high, __w.s.low, u, v); \
1174 __w.ll; })
1175 #endif
1177 /* Define this unconditionally, so it can be used for debugging. */
1178 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1179 do { \
1180 USItype __d1, __d0, __q1, __q0; \
1181 USItype __r1, __r0, __m; \
1182 __d1 = __ll_highpart (d); \
1183 __d0 = __ll_lowpart (d); \
1185 __r1 = (n1) % __d1; \
1186 __q1 = (n1) / __d1; \
1187 __m = (USItype) __q1 * __d0; \
1188 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1189 if (__r1 < __m) \
1191 __q1--, __r1 += (d); \
1192 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1193 if (__r1 < __m) \
1194 __q1--, __r1 += (d); \
1196 __r1 -= __m; \
1198 __r0 = __r1 % __d1; \
1199 __q0 = __r1 / __d1; \
1200 __m = (USItype) __q0 * __d0; \
1201 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1202 if (__r0 < __m) \
1204 __q0--, __r0 += (d); \
1205 if (__r0 >= (d)) \
1206 if (__r0 < __m) \
1207 __q0--, __r0 += (d); \
1209 __r0 -= __m; \
1211 (q) = (USItype) __q1 * __ll_B | __q0; \
1212 (r) = __r0; \
1213 } while (0)
1215 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1216 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1217 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1218 #define udiv_qrnnd(q, r, nh, nl, d) \
1219 do { \
1220 USItype __r; \
1221 (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
1222 (r) = __r; \
1223 } while (0)
1224 #endif
1226 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1227 #if !defined (udiv_qrnnd)
1228 #define UDIV_NEEDS_NORMALIZATION 1
1229 #define udiv_qrnnd __udiv_qrnnd_c
1230 #endif
1232 #if !defined (count_leading_zeros)
1233 extern const UQItype __clz_tab[];
1234 #define count_leading_zeros(count, x) \
1235 do { \
1236 USItype __xr = (x); \
1237 USItype __a; \
1239 if (SI_TYPE_SIZE <= 32) \
1241 __a = __xr < ((USItype)1<<2*__BITS4) \
1242 ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \
1243 : (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1245 else \
1247 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1248 if (((__xr >> __a) & 0xff) != 0) \
1249 break; \
1252 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1253 } while (0)
1254 #endif
1256 #ifndef UDIV_NEEDS_NORMALIZATION
1257 #define UDIV_NEEDS_NORMALIZATION 0
1258 #endif