1 /****************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
11 * All files in this archive are subject to the GNU General Public License.
12 * See the file COPYING in the source tree root for full license agreement.
14 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
15 * KIND, either express or implied.
17 ****************************************************************************/
19 /* fixed precision code. We use a combination of Sign 15.16 and Sign.31
22 The WMA decoder does not always follow this convention, and occasionally
23 renormalizes values to other formats in order to maximize precision.
24 However, only the two precisions above are provided in this file.
31 #define PRECISION64 16
34 #define fixtof64(x) (float)((float)(x) / (float)(1 << PRECISION64)) //does not work on int64_t!
35 #define ftofix32(x) ((fixed32)((x) * (float)(1 << PRECISION) + ((x) < 0 ? -0.5 : 0.5)))
36 #define itofix64(x) (IntTo64(x))
37 #define itofix32(x) ((x) << PRECISION)
38 #define fixtoi32(x) ((x) >> PRECISION)
39 #define fixtoi64(x) (IntFrom64(x))
44 fixed64
IntTo64(int x
);
45 int IntFrom64(fixed64 x
);
46 fixed32
Fixed32From64(fixed64 x
);
47 fixed64
Fixed32To64(fixed32 x
);
48 fixed64
fixmul64byfixed(fixed64 x
, fixed32 y
);
49 fixed32
fixdiv32(fixed32 x
, fixed32 y
);
50 fixed64
fixdiv64(fixed64 x
, fixed64 y
);
51 fixed32
fixsqrt32(fixed32 x
);
52 long fsincos(unsigned long phase
, fixed32
*cos
);
56 /*Sign-15.16 format */
58 #define fixmul32(x, y) \
62 asm ("smull %0, %1, %3, %4\n\t" \
63 "movs %0, %0, lsr %5\n\t" \
64 "adc %2, %0, %1, lsl %6" \
65 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
66 : "%r" (x), "r" (y), \
67 "M" (PRECISION), "M" (32 - PRECISION) \
72 #define fixmul32b(x, y) \
76 asm ("smull %0, %1, %3, %4\n\t" \
77 "movs %2, %1, lsl #1" \
78 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
84 #elif defined(CPU_COLDFIRE)
86 static inline int32_t fixmul32(int32_t x
, int32_t y
)
89 #warning Coldfire fixmul32() only works for PRECISION == 16
93 "mac.l %[x], %[y], %%acc0 \n" /* multiply */
94 "mulu.l %[y], %[x] \n" /* get lower half, avoid emac stall */
95 "movclr.l %%acc0, %[t1] \n" /* get higher half */
97 "move.w %[t1], %[x] \n"
99 : [t1
] "=&d" (t1
), [x
] "+d" (x
)
105 static inline int32_t fixmul32b(int32_t x
, int32_t y
)
108 "mac.l %[x], %[y], %%acc0 \n" /* multiply */
109 "movclr.l %%acc0, %[x] \n" /* get higher half */
118 static inline fixed32
fixmul32(fixed32 x
, fixed32 y
)
126 return (fixed32
)temp
;
129 static inline fixed32
fixmul32b(fixed32 x
, fixed32 y
)
136 temp
>>= 31; //16+31-16 = 31 bits
138 return (fixed32
)temp
;
145 void CMUL(fixed32
*x
, fixed32
*y
,
146 fixed32 a
, fixed32 b
,
147 fixed32 t
, fixed32 v
)
149 /* This version loses one bit of precision. Could be solved at the cost
150 * of 2 extra cycles if it becomes an issue. */
153 "smull %[l], %[y1], %[b], %[t] \n"
154 "smlal %[l], %[y1], %[a], %[v] \n"
155 "rsb %[b], %[b], #0 \n"
156 "smull %[l], %[x1], %[a], %[t] \n"
157 "smlal %[l], %[x1], %[b], %[v] \n"
158 : [l
] "=&r" (l
), [x1
]"=&r" (x1
), [y1
]"=&r" (y1
), [b
] "+r" (b
)
159 : [a
] "r" (a
), [t
] "r" (t
), [v
] "r" (v
)
165 #elif defined CPU_COLDFIRE
167 void CMUL(fixed32
*x
, fixed32
*y
,
168 fixed32 a
, fixed32 b
,
169 fixed32 t
, fixed32 v
)
171 asm volatile ("mac.l %[a], %[t], %%acc0;"
172 "msac.l %[b], %[v], %%acc0;"
173 "mac.l %[b], %[t], %%acc1;"
174 "mac.l %[a], %[v], %%acc1;"
175 "movclr.l %%acc0, %[a];"
176 "move.l %[a], (%[x]);"
177 "movclr.l %%acc1, %[a];"
178 "move.l %[a], (%[y]);"
180 : [x
] "a" (x
), [y
] "a" (y
),
181 [b
] "r" (b
), [t
] "r" (t
), [v
] "r" (v
)
186 void CMUL(fixed32
*pre
,
198 fixed32 _r1
= fixmul32b(_bref
, _aref
);
199 fixed32 _r2
= fixmul32b(_bimf
, _aimf
);
200 fixed32 _r3
= fixmul32b(_bref
, _aimf
);
201 fixed32 _r4
= fixmul32b(_bimf
, _aref
);