Import 2.3.4pre3
[davej-history.git] / arch / sparc / math-emu / sfp-util.h
blob75ec691244a286ac31a4883027bafd6a12c19d5b
1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/types.h>
4 #include <asm/byteorder.h>
6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
7 __asm__ ("addcc %r4,%5,%1
8 addx %r2,%3,%0" \
9 : "=r" ((USItype)(sh)), \
10 "=&r" ((USItype)(sl)) \
11 : "%rJ" ((USItype)(ah)), \
12 "rI" ((USItype)(bh)), \
13 "%rJ" ((USItype)(al)), \
14 "rI" ((USItype)(bl)) \
15 : "cc")
16 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
17 __asm__ ("subcc %r4,%5,%1
18 subx %r2,%3,%0" \
19 : "=r" ((USItype)(sh)), \
20 "=&r" ((USItype)(sl)) \
21 : "rJ" ((USItype)(ah)), \
22 "rI" ((USItype)(bh)), \
23 "rJ" ((USItype)(al)), \
24 "rI" ((USItype)(bl)) \
25 : "cc")
27 #define umul_ppmm(w1, w0, u, v) \
28 __asm__ ("! Inlined umul_ppmm
29 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
30 sra %3,31,%%g2 ! Don't move this insn
31 and %2,%%g2,%%g2 ! Don't move this insn
32 andcc %%g0,0,%%g1 ! Don't move this insn
33 mulscc %%g1,%3,%%g1
34 mulscc %%g1,%3,%%g1
35 mulscc %%g1,%3,%%g1
36 mulscc %%g1,%3,%%g1
37 mulscc %%g1,%3,%%g1
38 mulscc %%g1,%3,%%g1
39 mulscc %%g1,%3,%%g1
40 mulscc %%g1,%3,%%g1
41 mulscc %%g1,%3,%%g1
42 mulscc %%g1,%3,%%g1
43 mulscc %%g1,%3,%%g1
44 mulscc %%g1,%3,%%g1
45 mulscc %%g1,%3,%%g1
46 mulscc %%g1,%3,%%g1
47 mulscc %%g1,%3,%%g1
48 mulscc %%g1,%3,%%g1
49 mulscc %%g1,%3,%%g1
50 mulscc %%g1,%3,%%g1
51 mulscc %%g1,%3,%%g1
52 mulscc %%g1,%3,%%g1
53 mulscc %%g1,%3,%%g1
54 mulscc %%g1,%3,%%g1
55 mulscc %%g1,%3,%%g1
56 mulscc %%g1,%3,%%g1
57 mulscc %%g1,%3,%%g1
58 mulscc %%g1,%3,%%g1
59 mulscc %%g1,%3,%%g1
60 mulscc %%g1,%3,%%g1
61 mulscc %%g1,%3,%%g1
62 mulscc %%g1,%3,%%g1
63 mulscc %%g1,%3,%%g1
64 mulscc %%g1,%3,%%g1
65 mulscc %%g1,0,%%g1
66 add %%g1,%%g2,%0
67 rd %%y,%1" \
68 : "=r" ((USItype)(w1)), \
69 "=r" ((USItype)(w0)) \
70 : "%rI" ((USItype)(u)), \
71 "r" ((USItype)(v)) \
72 : "%g1", "%g2", "cc")
74 /* It's quite necessary to add this much assembler for the sparc.
75 The default udiv_qrnnd (in C) is more than 10 times slower! */
76 #define udiv_qrnnd(q, r, n1, n0, d) \
77 __asm__ ("! Inlined udiv_qrnnd
78 mov 32,%%g1
79 subcc %1,%2,%%g0
80 1: bcs 5f
81 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
82 sub %1,%2,%1 ! this kills msb of n
83 addx %1,%1,%1 ! so this can't give carry
84 subcc %%g1,1,%%g1
85 2: bne 1b
86 subcc %1,%2,%%g0
87 bcs 3f
88 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
89 b 3f
90 sub %1,%2,%1 ! this kills msb of n
91 4: sub %1,%2,%1
92 5: addxcc %1,%1,%1
93 bcc 2b
94 subcc %%g1,1,%%g1
95 ! Got carry from n. Subtract next step to cancel this carry.
96 bne 4b
97 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
98 sub %1,%2,%1
99 3: xnor %0,0,%0
100 ! End of inline udiv_qrnnd" \
101 : "=&r" ((USItype)(q)), \
102 "=&r" ((USItype)(r)) \
103 : "r" ((USItype)(d)), \
104 "1" ((USItype)(n1)), \
105 "0" ((USItype)(n0)) : "%g1", "cc")
106 #define UDIV_NEEDS_NORMALIZATION 0
108 #define abort() \
109 return 0
111 #ifdef __BIG_ENDIAN
112 #define __BYTE_ORDER __BIG_ENDIAN
113 #else
114 #define __BYTE_ORDER __LITTLE_ENDIAN
115 #endif