staging: dwc2: fix dwc2_hcd_qtd_add()
[linux-2.6/btrfs-unstable.git] / include / linux / math64.h
blob2913b86eb12a7a1068991b9342e7ed43c8eec1fe
1 #ifndef _LINUX_MATH64_H
2 #define _LINUX_MATH64_H
4 #include <linux/types.h>
5 #include <asm/div64.h>
7 #if BITS_PER_LONG == 64
9 #define div64_long(x, y) div64_s64((x), (y))
10 #define div64_ul(x, y) div64_u64((x), (y))
12 /**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
15 * This is commonly provided by 32bit archs to provide an optimized 64bit
16 * divide.
18 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
20 *remainder = dividend % divisor;
21 return dividend / divisor;
24 /**
25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
27 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
29 *remainder = dividend % divisor;
30 return dividend / divisor;
33 /**
34 * div64_u64 - unsigned 64bit divide with 64bit divisor
36 static inline u64 div64_u64(u64 dividend, u64 divisor)
38 return dividend / divisor;
41 /**
42 * div64_s64 - signed 64bit divide with 64bit divisor
44 static inline s64 div64_s64(s64 dividend, s64 divisor)
46 return dividend / divisor;
49 #elif BITS_PER_LONG == 32
51 #define div64_long(x, y) div_s64((x), (y))
52 #define div64_ul(x, y) div_u64((x), (y))
54 #ifndef div_u64_rem
55 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
57 *remainder = do_div(dividend, divisor);
58 return dividend;
60 #endif
62 #ifndef div_s64_rem
63 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
64 #endif
66 #ifndef div64_u64
67 extern u64 div64_u64(u64 dividend, u64 divisor);
68 #endif
70 #ifndef div64_s64
71 extern s64 div64_s64(s64 dividend, s64 divisor);
72 #endif
74 #endif /* BITS_PER_LONG */
76 /**
77 * div_u64 - unsigned 64bit divide with 32bit divisor
79 * This is the most common 64bit divide and should be used if possible,
80 * as many 32bit archs can optimize this variant better than a full 64bit
81 * divide.
83 #ifndef div_u64
84 static inline u64 div_u64(u64 dividend, u32 divisor)
86 u32 remainder;
87 return div_u64_rem(dividend, divisor, &remainder);
89 #endif
91 /**
92 * div_s64 - signed 64bit divide with 32bit divisor
94 #ifndef div_s64
95 static inline s64 div_s64(s64 dividend, s32 divisor)
97 s32 remainder;
98 return div_s64_rem(dividend, divisor, &remainder);
100 #endif
102 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
104 static __always_inline u32
105 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
107 u32 ret = 0;
109 while (dividend >= divisor) {
110 /* The following asm() prevents the compiler from
111 optimising this loop into a modulo operation. */
112 asm("" : "+rm"(dividend));
114 dividend -= divisor;
115 ret++;
118 *remainder = dividend;
120 return ret;
123 #endif /* _LINUX_MATH64_H */