1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/types.h>
4 #include <asm/byteorder.h>
6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
7 __asm__ ("addcc %r4,%5,%1
9 : "=r" ((USItype)(sh)), \
10 "=&r" ((USItype)(sl)) \
11 : "%rJ" ((USItype)(ah)), \
12 "rI" ((USItype)(bh)), \
13 "%rJ" ((USItype)(al)), \
14 "rI" ((USItype)(bl)) \
16 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
17 __asm__ ("subcc %r4,%5,%1
19 : "=r" ((USItype)(sh)), \
20 "=&r" ((USItype)(sl)) \
21 : "rJ" ((USItype)(ah)), \
22 "rI" ((USItype)(bh)), \
23 "rJ" ((USItype)(al)), \
24 "rI" ((USItype)(bl)) \
27 #define umul_ppmm(w1, w0, u, v) \
28 __asm__ ("! Inlined umul_ppmm
29 wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
30 sra %3,31,%%g2 ! Don't move this insn
31 and %2,%%g2,%%g2 ! Don't move this insn
32 andcc %%g0,0,%%g1 ! Don't move this insn
68 : "=r" ((USItype)(w1)), \
69 "=r" ((USItype)(w0)) \
70 : "%rI" ((USItype)(u)), \
74 /* It's quite necessary to add this much assembler for the sparc.
75 The default udiv_qrnnd (in C) is more than 10 times slower! */
76 #define udiv_qrnnd(q, r, n1, n0, d) \
77 __asm__ ("! Inlined udiv_qrnnd
81 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
82 sub %1,%2,%1 ! this kills msb of n
83 addx %1,%1,%1 ! so this can't give carry
88 addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
90 sub %1,%2,%1 ! this kills msb of n
95 ! Got carry from n. Subtract next step to cancel this carry.
97 addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
100 ! End of inline udiv_qrnnd" \
101 : "=&r" ((USItype)(q)), \
102 "=&r" ((USItype)(r)) \
103 : "r" ((USItype)(d)), \
104 "1" ((USItype)(n1)), \
105 "0" ((USItype)(n0)) : "%g1", "cc")
106 #define UDIV_NEEDS_NORMALIZATION 0
112 #define __BYTE_ORDER __BIG_ENDIAN
114 #define __BYTE_ORDER __LITTLE_ENDIAN