We removed the old Q15.16 precision trig functions from decoding ages ago, so no...
[kugel-rb.git] / apps / codecs / libwma / wmafixed.h
blob713c4d0b1916d336e3da0ff6b8f694c6c31a5316
1 /****************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
11 * All files in this archive are subject to the GNU General Public License.
12 * See the file COPYING in the source tree root for full license agreement.
14 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
15 * KIND, either express or implied.
17 ****************************************************************************/
19 /* fixed precision code. We use a combination of Sign 15.16 and Sign.31
20 precision here.
22 The WMA decoder does not always follow this convention, and occasionally
23 renormalizes values to other formats in order to maximize precision.
24 However, only the two precisions above are provided in this file.
28 #include "types.h"
30 #define PRECISION 16
31 #define PRECISION64 16
34 #define fixtof64(x) (float)((float)(x) / (float)(1 << PRECISION64)) //does not work on int64_t!
35 #define ftofix32(x) ((fixed32)((x) * (float)(1 << PRECISION) + ((x) < 0 ? -0.5 : 0.5)))
36 #define itofix64(x) (IntTo64(x))
37 #define itofix32(x) ((x) << PRECISION)
38 #define fixtoi32(x) ((x) >> PRECISION)
39 #define fixtoi64(x) (IntFrom64(x))
42 /*fixed functions*/
44 fixed64 IntTo64(int x);
45 int IntFrom64(fixed64 x);
46 fixed32 Fixed32From64(fixed64 x);
47 fixed64 Fixed32To64(fixed32 x);
48 fixed64 fixmul64byfixed(fixed64 x, fixed32 y);
49 fixed32 fixdiv32(fixed32 x, fixed32 y);
50 fixed64 fixdiv64(fixed64 x, fixed64 y);
51 fixed32 fixsqrt32(fixed32 x);
52 long fsincos(unsigned long phase, fixed32 *cos);
54 #ifdef CPU_ARM
56 /*Sign-15.16 format */
58 #define fixmul32(x, y) \
59 ({ int32_t __hi; \
60 uint32_t __lo; \
61 int32_t __result; \
62 asm ("smull %0, %1, %3, %4\n\t" \
63 "movs %0, %0, lsr %5\n\t" \
64 "adc %2, %0, %1, lsl %6" \
65 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
66 : "%r" (x), "r" (y), \
67 "M" (PRECISION), "M" (32 - PRECISION) \
68 : "cc"); \
69 __result; \
72 #define fixmul32b(x, y) \
73 ({ int32_t __hi; \
74 uint32_t __lo; \
75 int32_t __result; \
76 asm ("smull %0, %1, %3, %4\n\t" \
77 "movs %2, %1, lsl #1" \
78 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
79 : "%r" (x), "r" (y) \
80 : "cc"); \
81 __result; \
84 #elif defined(CPU_COLDFIRE)
86 static inline int32_t fixmul32(int32_t x, int32_t y)
88 #if PRECISION != 16
89 #warning Coldfire fixmul32() only works for PRECISION == 16
90 #endif
91 int32_t t1;
92 asm (
93 "mac.l %[x], %[y], %%acc0 \n" /* multiply */
94 "mulu.l %[y], %[x] \n" /* get lower half, avoid emac stall */
95 "movclr.l %%acc0, %[t1] \n" /* get higher half */
96 "lsr.l #1, %[t1] \n"
97 "move.w %[t1], %[x] \n"
98 "swap %[x] \n"
99 : [t1] "=&d" (t1), [x] "+d" (x)
100 : [y] "d" (y)
102 return x;
105 static inline int32_t fixmul32b(int32_t x, int32_t y)
107 asm (
108 "mac.l %[x], %[y], %%acc0 \n" /* multiply */
109 "movclr.l %%acc0, %[x] \n" /* get higher half */
110 : [x] "+d" (x)
111 : [y] "d" (y)
113 return x;
116 #else
118 static inline fixed32 fixmul32(fixed32 x, fixed32 y)
120 fixed64 temp;
121 temp = x;
122 temp *= y;
124 temp >>= PRECISION;
126 return (fixed32)temp;
129 static inline fixed32 fixmul32b(fixed32 x, fixed32 y)
131 fixed64 temp;
133 temp = x;
134 temp *= y;
136 temp >>= 31; //16+31-16 = 31 bits
138 return (fixed32)temp;
141 #endif
143 #ifdef CPU_ARM
144 static inline
145 void CMUL(fixed32 *x, fixed32 *y,
146 fixed32 a, fixed32 b,
147 fixed32 t, fixed32 v)
149 /* This version loses one bit of precision. Could be solved at the cost
150 * of 2 extra cycles if it becomes an issue. */
151 int x1, y1, l;
152 asm(
153 "smull %[l], %[y1], %[b], %[t] \n"
154 "smlal %[l], %[y1], %[a], %[v] \n"
155 "rsb %[b], %[b], #0 \n"
156 "smull %[l], %[x1], %[a], %[t] \n"
157 "smlal %[l], %[x1], %[b], %[v] \n"
158 : [l] "=&r" (l), [x1]"=&r" (x1), [y1]"=&r" (y1), [b] "+r" (b)
159 : [a] "r" (a), [t] "r" (t), [v] "r" (v)
160 : "cc"
162 *x = x1 << 1;
163 *y = y1 << 1;
165 #elif defined CPU_COLDFIRE
166 static inline
167 void CMUL(fixed32 *x, fixed32 *y,
168 fixed32 a, fixed32 b,
169 fixed32 t, fixed32 v)
171 asm volatile ("mac.l %[a], %[t], %%acc0;"
172 "msac.l %[b], %[v], %%acc0;"
173 "mac.l %[b], %[t], %%acc1;"
174 "mac.l %[a], %[v], %%acc1;"
175 "movclr.l %%acc0, %[a];"
176 "move.l %[a], (%[x]);"
177 "movclr.l %%acc1, %[a];"
178 "move.l %[a], (%[y]);"
179 : [a] "+&r" (a)
180 : [x] "a" (x), [y] "a" (y),
181 [b] "r" (b), [t] "r" (t), [v] "r" (v)
182 : "cc", "memory");
184 #else
185 static inline
186 void CMUL(fixed32 *pre,
187 fixed32 *pim,
188 fixed32 are,
189 fixed32 aim,
190 fixed32 bre,
191 fixed32 bim)
193 //int64_t x,y;
194 fixed32 _aref = are;
195 fixed32 _aimf = aim;
196 fixed32 _bref = bre;
197 fixed32 _bimf = bim;
198 fixed32 _r1 = fixmul32b(_bref, _aref);
199 fixed32 _r2 = fixmul32b(_bimf, _aimf);
200 fixed32 _r3 = fixmul32b(_bref, _aimf);
201 fixed32 _r4 = fixmul32b(_bimf, _aref);
202 *pre = _r1 - _r2;
203 *pim = _r3 + _r4;
206 #endif