Correct comment.
[maemo-rb.git] / apps / codecs / libmad / synth.c
blobab2bcbe829d1950a97a1f3ed82b705d49ac2b00f
1 /*
2 * libmad - MPEG audio decoder library
3 * Copyright (C) 2000-2004 Underbit Technologies, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * $Id$
22 # ifdef HAVE_CONFIG_H
23 # include "config.h"
24 # endif
26 # include "global.h"
28 # include "fixed.h"
29 # include "frame.h"
30 # include "synth.h"
33 * NAME: synth->init()
34 * DESCRIPTION: initialize synth struct
36 void mad_synth_init(struct mad_synth *synth)
38 mad_synth_mute(synth);
40 synth->phase = 0;
42 synth->pcm.samplerate = 0;
43 synth->pcm.channels = 0;
44 synth->pcm.length = 0;
45 #if defined(CPU_COLDFIRE)
46 /* init the emac unit here, since this function should always be called
47 before using libmad */
48 coldfire_set_macsr(EMAC_FRACTIONAL | EMAC_SATURATE | EMAC_ROUND);
49 #endif
53 * NAME: synth->mute()
54 * DESCRIPTION: zero all polyphase filterbank values, resetting synthesis
56 void mad_synth_mute(struct mad_synth *synth)
58 memset(synth->filter, 0, sizeof(synth->filter));
61 #if 0 /* dct32 asm implementation is slower on current arm systems */
62 /* #ifdef FPM_ARM */
64 void dct32(mad_fixed_t const in[32], unsigned int slot,
65 mad_fixed_t lo[16][8], mad_fixed_t hi[16][8]);
67 #else
70 * An optional optimization called here the Subband Synthesis Optimization
71 * (SSO) improves the performance of subband synthesis at the expense of
72 * accuracy.
74 * The idea is to simplify 32x32->64-bit multiplication to 32x32->32 such
75 * that extra scaling and rounding are not necessary. This often allows the
76 * compiler to use faster 32-bit multiply-accumulate instructions instead of
77 * explicit 64-bit multiply, shift, and add instructions.
79 * SSO works like this: a full 32x32->64-bit multiply of two mad_fixed_t
80 * values requires the result to be right-shifted 28 bits to be properly
81 * scaled to the same fixed-point format. Right shifts can be applied at any
82 * time to either operand or to the result, so the optimization involves
83 * careful placement of these shifts to minimize the loss of accuracy.
85 * First, a 14-bit shift is applied with rounding at compile-time to the D[]
86 * table of coefficients for the subband synthesis window. This only loses 2
87 * bits of accuracy because the lower 12 bits are always zero. A second
88 * 12-bit shift occurs after the DCT calculation. This loses 12 bits of
89 * accuracy. Finally, a third 2-bit shift occurs just before the sample is
90 * saved in the PCM buffer. 14 + 12 + 2 == 28 bits.
93 /* FPM_DEFAULT without OPT_SSO will actually not work. */
94 # if defined(FPM_DEFAULT) && !defined(OPT_SSO)
95 # define OPT_SSO
96 # endif
98 /* second SSO shift, with rounding */
99 # if defined(OPT_SSO)
100 # define SHIFT(x) (((x) + (1L << 11)) >> 12)
101 # else
102 # define SHIFT(x) (x)
103 # endif
105 /* possible DCT speed optimization */
106 # if defined(FPM_COLDFIRE_EMAC)
107 /* This is a Coldfire version of the OPT_SPEED optimisation below, but in the
108 case of Coldfire it does not loose additional precision. */
109 # define MUL(x, y) \
110 ({ \
111 mad_fixed64hi_t hi; \
112 asm volatile("mac.l %[a], %[b], %%acc0\n\t" \
113 "movclr.l %%acc0, %[hi]" \
114 : [hi] "=r" (hi) \
115 : [a] "r" ((x)), [b] "r" ((y))); \
116 hi; \
118 # elif defined(FPM_ARM)
119 /* This is an ARM version of the OPT_SPEED optimisation below. This
120 implementation will loose 1 bit of accuracy. */
121 # define MUL(x, y) \
122 ({ \
123 mad_fixed64lo_t lo; \
124 mad_fixed64hi_t hi; \
125 asm volatile ( \
126 "smull %[lo], %[hi], %[a], %[b]\n\t" /* hi = result */ \
127 "mov %[hi], %[hi], lsl #1" /* hi <<= 1 */ \
128 : [lo]"=&r"(lo), [hi]"=&r"(hi) \
129 : [a]"r"(x), [b]"r"(y)); \
130 hi; \
132 # elif defined(OPT_SPEED) && defined(MAD_F_MLX)
133 # define MUL(x, y) \
134 ({ mad_fixed64hi_t hi; \
135 mad_fixed64lo_t lo; \
136 MAD_F_MLX(hi, lo, (x), (y)); \
137 hi << (32 - MAD_F_SCALEBITS - 3); \
139 # else
140 # define MUL(x, y) mad_f_mul((x), (y>>3))
141 # endif
144 * NAME: dct32()
145 * DESCRIPTION: perform fast in[32]->out[32] DCT
147 static
148 void dct32(mad_fixed_t const in[32], unsigned int slot,
149 mad_fixed_t lo[16][8], mad_fixed_t hi[16][8])
151 mad_fixed_t t0, t1, t2, t3, t4, t5, t6, t7;
152 mad_fixed_t t8, t9, t10, t11, t12, t13, t14, t15;
153 mad_fixed_t t16, t17, t18, t19, t20, t21, t22, t23;
154 mad_fixed_t t24, t25, t26, t27, t28, t29, t30, t31;
155 mad_fixed_t t32, t33, t34, t35, t36, t37, t38, t39;
156 mad_fixed_t t40, t41, t42, t43, t44, t45, t46, t47;
157 mad_fixed_t t48, t49, t50, t51, t52, t53, t54, t55;
158 mad_fixed_t t56, t57, t58, t59, t60, t61, t62, t63;
159 mad_fixed_t t64, t65, t66, t67, t68, t69, t70, t71;
160 mad_fixed_t t72, t73, t74, t75, t76, t77, t78, t79;
161 mad_fixed_t t80, t81, t82, t83, t84, t85, t86, t87;
162 mad_fixed_t t88, t89, t90, t91, t92, t93, t94, t95;
163 mad_fixed_t t96, t97, t98, t99, t100, t101, t102, t103;
164 mad_fixed_t t104, t105, t106, t107, t108, t109, t110, t111;
165 mad_fixed_t t112, t113, t114, t115, t116, t117, t118, t119;
166 mad_fixed_t t120, t121, t122, t123, t124, t125, t126, t127;
167 mad_fixed_t t128, t129, t130, t131, t132, t133, t134, t135;
168 mad_fixed_t t136, t137, t138, t139, t140, t141, t142, t143;
169 mad_fixed_t t144, t145, t146, t147, t148, t149, t150, t151;
170 mad_fixed_t t152, t153, t154, t155, t156, t157, t158, t159;
171 mad_fixed_t t160, t161, t162, t163, t164, t165, t166, t167;
172 mad_fixed_t t168, t169, t170, t171, t172, t173, t174, t175;
173 mad_fixed_t t176;
175 /* costab[i] = cos(PI / (2 * 32) * i) */
176 #define costab1 MAD_F(0x7fd8878e) /* 0.998795456 */
177 #define costab2 MAD_F(0x7f62368f) /* 0.995184727 */
178 #define costab3 MAD_F(0x7e9d55fc) /* 0.989176510 */
179 #define costab4 MAD_F(0x7d8a5f40) /* 0.980785280 */
180 #define costab5 MAD_F(0x7c29fbee) /* 0.970031253 */
181 #define costab6 MAD_F(0x7a7d055b) /* 0.956940336 */
182 #define costab7 MAD_F(0x78848414) /* 0.941544065 */
183 #define costab8 MAD_F(0x7641af3d) /* 0.923879533 */
184 #define costab9 MAD_F(0x73b5ebd1) /* 0.903989293 */
185 #define costab10 MAD_F(0x70e2cbc6) /* 0.881921264 */
186 #define costab11 MAD_F(0x6dca0d14) /* 0.857728610 */
187 #define costab12 MAD_F(0x6a5d98a4) /* 0.831469612 */
188 #define costab13 MAD_F(0x66cf8120) /* 0.803207531 */
189 #define costab14 MAD_F(0x62f201ac) /* 0.773010453 */
190 #define costab15 MAD_F(0x5ed77c8a) /* 0.740951125 */
191 #define costab16 MAD_F(0x5a82799a) /* 0.707106781 */
192 #define costab17 MAD_F(0x55f5a4d2) /* 0.671558955 */
193 #define costab18 MAD_F(0x5133cc94) /* 0.634393284 */
194 #define costab19 MAD_F(0x4c3fdff4) /* 0.595699304 */
195 #define costab20 MAD_F(0x471cece7) /* 0.555570233 */
196 #define costab21 MAD_F(0x41ce1e65) /* 0.514102744 */
197 #define costab22 MAD_F(0x3c56ba70) /* 0.471396737 */
198 #define costab23 MAD_F(0x36ba2014) /* 0.427555093 */
199 #define costab24 MAD_F(0x30fbc54d) /* 0.382683432 */
200 #define costab25 MAD_F(0x2b1f34eb) /* 0.336889853 */
201 #define costab26 MAD_F(0x25280c5e) /* 0.290284677 */
202 #define costab27 MAD_F(0x1f19f97b) /* 0.242980180 */
203 #define costab28 MAD_F(0x18f8b83c) /* 0.195090322 */
204 #define costab29 MAD_F(0x12c8106f) /* 0.146730474 */
205 #define costab30 MAD_F(0x0c8bd35e) /* 0.098017140 */
206 #define costab31 MAD_F(0x0647d97c) /* 0.049067674 */
208 t0 = in[0] + in[31]; t16 = MUL(in[0] - in[31], costab1);
209 t1 = in[15] + in[16]; t17 = MUL(in[15] - in[16], costab31);
211 t41 = t16 + t17;
212 t59 = MUL(t16 - t17, costab2);
213 t33 = t0 + t1;
214 t50 = MUL(t0 - t1, costab2);
216 t2 = in[7] + in[24]; t18 = MUL(in[7] - in[24], costab15);
217 t3 = in[8] + in[23]; t19 = MUL(in[8] - in[23], costab17);
219 t42 = t18 + t19;
220 t60 = MUL(t18 - t19, costab30);
221 t34 = t2 + t3;
222 t51 = MUL(t2 - t3, costab30);
224 t4 = in[3] + in[28]; t20 = MUL(in[3] - in[28], costab7);
225 t5 = in[12] + in[19]; t21 = MUL(in[12] - in[19], costab25);
227 t43 = t20 + t21;
228 t61 = MUL(t20 - t21, costab14);
229 t35 = t4 + t5;
230 t52 = MUL(t4 - t5, costab14);
232 t6 = in[4] + in[27]; t22 = MUL(in[4] - in[27], costab9);
233 t7 = in[11] + in[20]; t23 = MUL(in[11] - in[20], costab23);
235 t44 = t22 + t23;
236 t62 = MUL(t22 - t23, costab18);
237 t36 = t6 + t7;
238 t53 = MUL(t6 - t7, costab18);
240 t8 = in[1] + in[30]; t24 = MUL(in[1] - in[30], costab3);
241 t9 = in[14] + in[17]; t25 = MUL(in[14] - in[17], costab29);
243 t45 = t24 + t25;
244 t63 = MUL(t24 - t25, costab6);
245 t37 = t8 + t9;
246 t54 = MUL(t8 - t9, costab6);
248 t10 = in[6] + in[25]; t26 = MUL(in[6] - in[25], costab13);
249 t11 = in[9] + in[22]; t27 = MUL(in[9] - in[22], costab19);
251 t46 = t26 + t27;
252 t64 = MUL(t26 - t27, costab26);
253 t38 = t10 + t11;
254 t55 = MUL(t10 - t11, costab26);
256 t12 = in[2] + in[29]; t28 = MUL(in[2] - in[29], costab5);
257 t13 = in[13] + in[18]; t29 = MUL(in[13] - in[18], costab27);
259 t47 = t28 + t29;
260 t65 = MUL(t28 - t29, costab10);
261 t39 = t12 + t13;
262 t56 = MUL(t12 - t13, costab10);
264 t14 = in[5] + in[26]; t30 = MUL(in[5] - in[26], costab11);
265 t15 = in[10] + in[21]; t31 = MUL(in[10] - in[21], costab21);
267 t48 = t30 + t31;
268 t66 = MUL(t30 - t31, costab22);
269 t40 = t14 + t15;
270 t57 = MUL(t14 - t15, costab22);
272 t69 = t33 + t34; t89 = MUL(t33 - t34, costab4);
273 t70 = t35 + t36; t90 = MUL(t35 - t36, costab28);
274 t71 = t37 + t38; t91 = MUL(t37 - t38, costab12);
275 t72 = t39 + t40; t92 = MUL(t39 - t40, costab20);
276 t73 = t41 + t42; t94 = MUL(t41 - t42, costab4);
277 t74 = t43 + t44; t95 = MUL(t43 - t44, costab28);
278 t75 = t45 + t46; t96 = MUL(t45 - t46, costab12);
279 t76 = t47 + t48; t97 = MUL(t47 - t48, costab20);
281 t78 = t50 + t51; t100 = MUL(t50 - t51, costab4);
282 t79 = t52 + t53; t101 = MUL(t52 - t53, costab28);
283 t80 = t54 + t55; t102 = MUL(t54 - t55, costab12);
284 t81 = t56 + t57; t103 = MUL(t56 - t57, costab20);
286 t83 = t59 + t60; t106 = MUL(t59 - t60, costab4);
287 t84 = t61 + t62; t107 = MUL(t61 - t62, costab28);
288 t85 = t63 + t64; t108 = MUL(t63 - t64, costab12);
289 t86 = t65 + t66; t109 = MUL(t65 - t66, costab20);
291 t113 = t69 + t70;
292 t114 = t71 + t72;
294 /* 0 */ hi[15][slot] = SHIFT(t113 + t114);
295 /* 16 */ lo[ 0][slot] = SHIFT(MUL(t113 - t114, costab16));
297 t115 = t73 + t74;
298 t116 = t75 + t76;
300 t32 = t115 + t116;
302 /* 1 */ hi[14][slot] = SHIFT(t32);
304 t118 = t78 + t79;
305 t119 = t80 + t81;
307 t58 = t118 + t119;
309 /* 2 */ hi[13][slot] = SHIFT(t58);
311 t121 = t83 + t84;
312 t122 = t85 + t86;
314 t67 = t121 + t122;
316 t49 = (t67 * 2) - t32;
318 /* 3 */ hi[12][slot] = SHIFT(t49);
320 t125 = t89 + t90;
321 t126 = t91 + t92;
323 t93 = t125 + t126;
325 /* 4 */ hi[11][slot] = SHIFT(t93);
327 t128 = t94 + t95;
328 t129 = t96 + t97;
330 t98 = t128 + t129;
332 t68 = (t98 * 2) - t49;
334 /* 5 */ hi[10][slot] = SHIFT(t68);
336 t132 = t100 + t101;
337 t133 = t102 + t103;
339 t104 = t132 + t133;
341 t82 = (t104 * 2) - t58;
343 /* 6 */ hi[ 9][slot] = SHIFT(t82);
345 t136 = t106 + t107;
346 t137 = t108 + t109;
348 t110 = t136 + t137;
350 t87 = (t110 * 2) - t67;
352 t77 = (t87 * 2) - t68;
354 /* 7 */ hi[ 8][slot] = SHIFT(t77);
356 t141 = MUL(t69 - t70, costab8);
357 t142 = MUL(t71 - t72, costab24);
358 t143 = t141 + t142;
360 /* 8 */ hi[ 7][slot] = SHIFT(t143);
361 /* 24 */ lo[ 8][slot] =
362 SHIFT((MUL(t141 - t142, costab16) * 2) - t143);
364 t144 = MUL(t73 - t74, costab8);
365 t145 = MUL(t75 - t76, costab24);
366 t146 = t144 + t145;
368 t88 = (t146 * 2) - t77;
370 /* 9 */ hi[ 6][slot] = SHIFT(t88);
372 t148 = MUL(t78 - t79, costab8);
373 t149 = MUL(t80 - t81, costab24);
374 t150 = t148 + t149;
376 t105 = (t150 * 2) - t82;
378 /* 10 */ hi[ 5][slot] = SHIFT(t105);
380 t152 = MUL(t83 - t84, costab8);
381 t153 = MUL(t85 - t86, costab24);
382 t154 = t152 + t153;
384 t111 = (t154 * 2) - t87;
386 t99 = (t111 * 2) - t88;
388 /* 11 */ hi[ 4][slot] = SHIFT(t99);
390 t157 = MUL(t89 - t90, costab8);
391 t158 = MUL(t91 - t92, costab24);
392 t159 = t157 + t158;
394 t127 = (t159 * 2) - t93;
396 /* 12 */ hi[ 3][slot] = SHIFT(t127);
398 t160 = (MUL(t125 - t126, costab16) * 2) - t127;
400 /* 20 */ lo[ 4][slot] = SHIFT(t160);
401 /* 28 */ lo[12][slot] =
402 SHIFT((((MUL(t157 - t158, costab16) * 2) - t159) * 2) - t160);
404 t161 = MUL(t94 - t95, costab8);
405 t162 = MUL(t96 - t97, costab24);
406 t163 = t161 + t162;
408 t130 = (t163 * 2) - t98;
410 t112 = (t130 * 2) - t99;
412 /* 13 */ hi[ 2][slot] = SHIFT(t112);
414 t164 = (MUL(t128 - t129, costab16) * 2) - t130;
416 t166 = MUL(t100 - t101, costab8);
417 t167 = MUL(t102 - t103, costab24);
418 t168 = t166 + t167;
420 t134 = (t168 * 2) - t104;
422 t120 = (t134 * 2) - t105;
424 /* 14 */ hi[ 1][slot] = SHIFT(t120);
426 t135 = (MUL(t118 - t119, costab16) * 2) - t120;
428 /* 18 */ lo[ 2][slot] = SHIFT(t135);
430 t169 = (MUL(t132 - t133, costab16) * 2) - t134;
432 t151 = (t169 * 2) - t135;
434 /* 22 */ lo[ 6][slot] = SHIFT(t151);
436 t170 = (((MUL(t148 - t149, costab16) * 2) - t150) * 2) - t151;
438 /* 26 */ lo[10][slot] = SHIFT(t170);
439 /* 30 */ lo[14][slot] =
440 SHIFT((((((MUL(t166 - t167, costab16) * 2) -
441 t168) * 2) - t169) * 2) - t170);
443 t171 = MUL(t106 - t107, costab8);
444 t172 = MUL(t108 - t109, costab24);
445 t173 = t171 + t172;
447 t138 = (t173 * 2) - t110;
449 t123 = (t138 * 2) - t111;
451 t139 = (MUL(t121 - t122, costab16) * 2) - t123;
453 t117 = (t123 * 2) - t112;
455 /* 15 */ hi[ 0][slot] = SHIFT(t117);
457 t124 = (MUL(t115 - t116, costab16) * 2) - t117;
459 /* 17 */ lo[ 1][slot] = SHIFT(t124);
461 t131 = (t139 * 2) - t124;
463 /* 19 */ lo[ 3][slot] = SHIFT(t131);
465 t140 = (t164 * 2) - t131;
467 /* 21 */ lo[ 5][slot] = SHIFT(t140);
469 t174 = (MUL(t136 - t137, costab16) * 2) - t138;
471 t155 = (t174 * 2) - t139;
473 t147 = (t155 * 2) - t140;
475 /* 23 */ lo[ 7][slot] = SHIFT(t147);
477 t156 = (((MUL(t144 - t145, costab16) * 2) - t146) * 2) - t147;
479 /* 25 */ lo[ 9][slot] = SHIFT(t156);
481 t175 = (((MUL(t152 - t153, costab16) * 2) - t154) * 2) - t155;
483 t165 = (t175 * 2) - t156;
485 /* 27 */ lo[11][slot] = SHIFT(t165);
487 t176 = (((((MUL(t161 - t162, costab16) * 2) -
488 t163) * 2) - t164) * 2) - t165;
490 /* 29 */ lo[13][slot] = SHIFT(t176);
491 /* 31 */ lo[15][slot] =
492 SHIFT((((((((MUL(t171 - t172, costab16) * 2) -
493 t173) * 2) - t174) * 2) - t175) * 2) - t176);
496 * Totals:
497 * 80 multiplies
498 * 80 additions
499 * 119 subtractions
500 * 49 shifts (not counting SSO)
504 # undef MUL
505 # undef SHIFT
507 #endif
509 /* third SSO shift and/or D[] optimization preshift */
511 # if defined(OPT_SSO)
512 # if MAD_F_FRACBITS != 28
513 # error "MAD_F_FRACBITS must be 28 to use OPT_SSO"
514 # endif
515 # define ML0(hi, lo, x, y) ((lo) = (x) * (y))
516 # define MLA(hi, lo, x, y) ((lo) += (x) * (y))
517 # define MLN(hi, lo) ((lo) = -(lo))
518 # define MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo))
519 # define SHIFT(x) ((x) >> 2)
520 # define PRESHIFT(x) ((MAD_F(x) + (1L << 13)) >> 14)
521 # else
522 # define ML0(hi, lo, x, y) MAD_F_ML0((hi), (lo), (x), (y))
523 # define MLA(hi, lo, x, y) MAD_F_MLA((hi), (lo), (x), (y))
524 # define MLN(hi, lo) MAD_F_MLN((hi), (lo))
525 # define MLZ(hi, lo) MAD_F_MLZ((hi), (lo))
526 # define SHIFT(x) (x)
527 # if defined(MAD_F_SCALEBITS)
528 # undef MAD_F_SCALEBITS
529 # define MAD_F_SCALEBITS (MAD_F_FRACBITS - 12)
530 # define PRESHIFT(x) (MAD_F(x) >> 12)
531 # else
532 # define PRESHIFT(x) MAD_F(x)
533 # endif
534 # endif
536 static
537 mad_fixed_t const D[17][32] ICONST_ATTR = {
538 # include "D.dat"
542 * NAME: synth->full()
543 * DESCRIPTION: perform full frequency PCM synthesis
545 /* optimised version of synth_full */
546 # ifdef FPM_COLDFIRE_EMAC
548 #define SYNTH_EMAC1(res, f1, pD) \
549 asm volatile( \
550 "movem.l (%0), %%d0-%%d7 \n\t" \
551 "move.l (%1), %%a5 \n\t" \
552 "mac.l %%d0, %%a5, 56(%1), %%a5, %%acc0\n\t" \
553 "mac.l %%d1, %%a5, 48(%1), %%a5, %%acc0\n\t" \
554 "mac.l %%d2, %%a5, 40(%1), %%a5, %%acc0\n\t" \
555 "mac.l %%d3, %%a5, 32(%1), %%a5, %%acc0\n\t" \
556 "mac.l %%d4, %%a5, 24(%1), %%a5, %%acc0\n\t" \
557 "mac.l %%d5, %%a5, 16(%1), %%a5, %%acc0\n\t" \
558 "mac.l %%d6, %%a5, 8(%1), %%a5, %%acc0\n\t" \
559 "mac.l %%d7, %%a5, %%acc0\n\t" \
561 : "a" (*f1), "a" (*pD) \
562 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5"); \
563 asm volatile ( \
564 "movclr.l %%acc0, %0 \n\t" \
565 : "=d" (res));
567 #define SYNTH_EMAC2(res, f1, f2, pD) \
568 asm volatile( \
569 "movem.l (%0), %%d0-%%d7 \n\t" \
570 "move.l 4(%1), %%a5 \n\t" \
571 "msac.l %%d0, %%a5, 60(%1), %%a5, %%acc0\n\t" \
572 "msac.l %%d1, %%a5, 52(%1), %%a5, %%acc0\n\t" \
573 "msac.l %%d2, %%a5, 44(%1), %%a5, %%acc0\n\t" \
574 "msac.l %%d3, %%a5, 36(%1), %%a5, %%acc0\n\t" \
575 "msac.l %%d4, %%a5, 28(%1), %%a5, %%acc0\n\t" \
576 "msac.l %%d5, %%a5, 20(%1), %%a5, %%acc0\n\t" \
577 "msac.l %%d6, %%a5, 12(%1), %%a5, %%acc0\n\t" \
578 "msac.l %%d7, %%a5, (%1), %%a5, %%acc0\n\t" \
579 "movem.l (%2), %%d0-%%d7 \n\t" \
580 "mac.l %%d0, %%a5, 56(%1), %%a5, %%acc0\n\t" \
581 "mac.l %%d1, %%a5, 48(%1), %%a5, %%acc0\n\t" \
582 "mac.l %%d2, %%a5, 40(%1), %%a5, %%acc0\n\t" \
583 "mac.l %%d3, %%a5, 32(%1), %%a5, %%acc0\n\t" \
584 "mac.l %%d4, %%a5, 24(%1), %%a5, %%acc0\n\t" \
585 "mac.l %%d5, %%a5, 16(%1), %%a5, %%acc0\n\t" \
586 "mac.l %%d6, %%a5, 8(%1), %%a5, %%acc0\n\t" \
587 "mac.l %%d7, %%a5, %%acc0\n\t" \
589 : "a" (*f1), "a" (*pD), "a" (*f2) \
590 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
591 asm volatile ( \
592 "movclr.l %%acc0, %0 \n\t" \
593 : "=d" (res));
595 #define SYNTH_EMAC_ODD_SBSAMPLE(f1, f2, pD1, pD2, res1, res2) \
596 asm volatile ( \
597 "movem.l (%0), %%d0-%%d7 \n\t" \
598 "move.l 4(%2), %%a5 \n\t" \
599 "msac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t" \
600 "msac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t" \
601 "msac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t" \
602 "msac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t" \
603 "msac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t" \
604 "msac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t" \
605 "msac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t" \
606 "msac.l %%d7, %%a5, 112(%3), %%a5, %%acc0\n\t" \
607 "mac.l %%d7, %%a5, 104(%3), %%a5, %%acc1\n\t" \
608 "mac.l %%d6, %%a5, 96(%3), %%a5, %%acc1\n\t" \
609 "mac.l %%d5, %%a5, 88(%3), %%a5, %%acc1\n\t" \
610 "mac.l %%d4, %%a5, 80(%3), %%a5, %%acc1\n\t" \
611 "mac.l %%d3, %%a5, 72(%3), %%a5, %%acc1\n\t" \
612 "mac.l %%d2, %%a5, 64(%3), %%a5, %%acc1\n\t" \
613 "mac.l %%d1, %%a5, 120(%3), %%a5, %%acc1\n\t" \
614 "mac.l %%d0, %%a5, 8(%2), %%a5, %%acc1\n\t" \
615 "movem.l (%1), %%d0-%%d7 \n\t" \
616 "mac.l %%d7, %%a5, 16(%2), %%a5, %%acc0\n\t" \
617 "mac.l %%d6, %%a5, 24(%2), %%a5, %%acc0\n\t" \
618 "mac.l %%d5, %%a5, 32(%2), %%a5, %%acc0\n\t" \
619 "mac.l %%d4, %%a5, 40(%2), %%a5, %%acc0\n\t" \
620 "mac.l %%d3, %%a5, 48(%2), %%a5, %%acc0\n\t" \
621 "mac.l %%d2, %%a5, 56(%2), %%a5, %%acc0\n\t" \
622 "mac.l %%d1, %%a5, (%2), %%a5, %%acc0\n\t" \
623 "mac.l %%d0, %%a5, 60(%3), %%a5, %%acc0\n\t" \
624 "mac.l %%d0, %%a5, 68(%3), %%a5, %%acc1\n\t" \
625 "mac.l %%d1, %%a5, 76(%3), %%a5, %%acc1\n\t" \
626 "mac.l %%d2, %%a5, 84(%3), %%a5, %%acc1\n\t" \
627 "mac.l %%d3, %%a5, 92(%3), %%a5, %%acc1\n\t" \
628 "mac.l %%d4, %%a5, 100(%3), %%a5, %%acc1\n\t" \
629 "mac.l %%d5, %%a5, 108(%3), %%a5, %%acc1\n\t" \
630 "mac.l %%d6, %%a5, 116(%3), %%a5, %%acc1\n\t" \
631 "mac.l %%d7, %%a5, %%acc1\n\t" \
633 : "a" (*f1), "a" (*f2), "a" (*pD1), "a" (*pD2) \
634 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
635 asm volatile( \
636 "movclr.l %%acc0, %0\n\t" \
637 "movclr.l %%acc1, %1\n\t" \
638 : "=d" (res1), "=d" (res2) );
640 #define SYNTH_EMAC_EVEN_SBSAMPLE(f1, f2, pD1, pD2, res1, res2) \
641 asm volatile ( \
642 "movem.l (%0), %%d0-%%d7 \n\t" \
643 "move.l (%2), %%a5 \n\t" \
644 "msac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t" \
645 "msac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t" \
646 "msac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t" \
647 "msac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t" \
648 "msac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t" \
649 "msac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t" \
650 "msac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t" \
651 "msac.l %%d7, %%a5, 116(%3), %%a5, %%acc0\n\t" \
652 "mac.l %%d7, %%a5, 108(%3), %%a5, %%acc1\n\t" \
653 "mac.l %%d6, %%a5, 100(%3), %%a5, %%acc1\n\t" \
654 "mac.l %%d5, %%a5, 92(%3), %%a5, %%acc1\n\t" \
655 "mac.l %%d4, %%a5, 84(%3), %%a5, %%acc1\n\t" \
656 "mac.l %%d3, %%a5, 76(%3), %%a5, %%acc1\n\t" \
657 "mac.l %%d2, %%a5, 68(%3), %%a5, %%acc1\n\t" \
658 "mac.l %%d1, %%a5, 60(%3), %%a5, %%acc1\n\t" \
659 "mac.l %%d0, %%a5, 12(%2), %%a5, %%acc1\n\t" \
660 "movem.l (%1), %%d0-%%d7 \n\t" \
661 "mac.l %%d7, %%a5, 20(%2), %%a5, %%acc0\n\t" \
662 "mac.l %%d6, %%a5, 28(%2), %%a5, %%acc0\n\t" \
663 "mac.l %%d5, %%a5, 36(%2), %%a5, %%acc0\n\t" \
664 "mac.l %%d4, %%a5, 44(%2), %%a5, %%acc0\n\t" \
665 "mac.l %%d3, %%a5, 52(%2), %%a5, %%acc0\n\t" \
666 "mac.l %%d2, %%a5, 60(%2), %%a5, %%acc0\n\t" \
667 "mac.l %%d1, %%a5, 4(%2), %%a5, %%acc0\n\t" \
668 "mac.l %%d0, %%a5, 120(%3), %%a5, %%acc0\n\t" \
669 "mac.l %%d0, %%a5, 64(%3), %%a5, %%acc1\n\t" \
670 "mac.l %%d1, %%a5, 72(%3), %%a5, %%acc1\n\t" \
671 "mac.l %%d2, %%a5, 80(%3), %%a5, %%acc1\n\t" \
672 "mac.l %%d3, %%a5, 88(%3), %%a5, %%acc1\n\t" \
673 "mac.l %%d4, %%a5, 96(%3), %%a5, %%acc1\n\t" \
674 "mac.l %%d5, %%a5, 104(%3), %%a5, %%acc1\n\t" \
675 "mac.l %%d6, %%a5, 112(%3), %%a5, %%acc1\n\t" \
676 "mac.l %%d7, %%a5, %%acc1\n\t" \
678 : "a" (*f1), "a" (*f2), "a" (*pD1), "a" (*pD2) \
679 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
680 asm volatile( \
681 "movclr.l %%acc0, %0\n\t" \
682 "movclr.l %%acc1, %1\n\t" \
683 : "=d" (res1), "=d" (res2) );
685 static
686 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
687 unsigned int nch, unsigned int ns)
689 int sb;
690 unsigned int phase, ch, s, p;
691 mad_fixed_t *pcm, (*filter)[2][2][16][8];
692 mad_fixed_t (*sbsample)[36][32];
693 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
694 mad_fixed_t const (*D0ptr)[32];
695 mad_fixed_t const (*D1ptr)[32];
696 mad_fixed64hi_t hi0, hi1;
698 for (ch = 0; ch < nch; ++ch) {
699 sbsample = &(*frame->sbsample_prev)[ch];
700 filter = &synth->filter[ch];
701 phase = synth->phase;
702 pcm = synth->pcm.samples[ch];
704 for (s = 0; s < ns; ++s) {
705 dct32((*sbsample)[s], phase >> 1,
706 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
708 p = (phase - 1) & 0xf;
710 /* calculate 32 samples */
711 fe = &(*filter)[0][ phase & 1][0];
712 fx = &(*filter)[0][~phase & 1][0];
713 fo = &(*filter)[1][~phase & 1][0];
715 D0ptr = (void*)&D[0][ p];
716 D1ptr = (void*)&D[0][-p];
718 if(s & 1)
720 SYNTH_EMAC2(hi0, fx, fe, D0ptr);
721 pcm[0] = hi0 << 3; /* shift result to libmad's fixed point format */
722 pcm += 16;
724 for (sb = 15; sb; sb--, fo++) {
725 ++fe;
726 ++D0ptr;
727 ++D1ptr;
729 /* D[32 - sb][i] == -D[sb][31 - i] */
730 SYNTH_EMAC_ODD_SBSAMPLE(fo, fe, D0ptr, D1ptr, hi0, hi1);
731 pcm[-sb] = hi0 << 3;
732 pcm[ sb] = hi1 << 3;
735 ++D0ptr;
736 SYNTH_EMAC1(hi0, fo, D0ptr+1);
737 pcm[0] = -(hi0 << 3);
739 else
741 SYNTH_EMAC2(hi0, fe, fx, D0ptr);
742 pcm[0] = -(hi0 << 3); /* shift result to libmad's fixed point format */
743 pcm += 16;
745 for (sb = 15; sb; sb--, fo++) {
746 ++fe;
747 ++D0ptr;
748 ++D1ptr;
750 /* D[32 - sb][i] == -D[sb][31 - i] */
751 SYNTH_EMAC_EVEN_SBSAMPLE(fo, fe, D0ptr, D1ptr, hi0, hi1);
752 pcm[-sb] = hi0 << 3;
753 pcm[ sb] = hi1 << 3;
756 ++D0ptr;
757 SYNTH_EMAC1(hi0, fo, D0ptr);
758 pcm[0] = -(hi0 << 3);
760 pcm += 16;
761 phase = (phase + 1) % 16;
766 #elif defined(FPM_ARM)
768 #define PROD_O(hi, lo, f, ptr) \
769 ({ \
770 mad_fixed_t *__p = (f); \
771 asm volatile ( \
772 "ldmia %2!, {r0, r1, r2, r3} \n\t" \
773 "ldr r4, [%3, #0] \n\t" \
774 "smull %0, %1, r0, r4 \n\t" \
775 "ldr r4, [%3, #56] \n\t" \
776 "smlal %0, %1, r1, r4 \n\t" \
777 "ldr r4, [%3, #48] \n\t" \
778 "smlal %0, %1, r2, r4 \n\t" \
779 "ldr r4, [%3, #40] \n\t" \
780 "smlal %0, %1, r3, r4 \n\t" \
781 "ldmia %2, {r0, r1, r2, r3} \n\t" \
782 "ldr r4, [%3, #32] \n\t" \
783 "smlal %0, %1, r0, r4 \n\t" \
784 "ldr r4, [%3, #24] \n\t" \
785 "smlal %0, %1, r1, r4 \n\t" \
786 "ldr r4, [%3, #16] \n\t" \
787 "smlal %0, %1, r2, r4 \n\t" \
788 "ldr r4, [%3, #8] \n\t" \
789 "smlal %0, %1, r3, r4 \n\t" \
790 : "=&r" (lo), "=&r" (hi), "+r" (__p) \
791 : "r" (ptr) \
792 : "r0", "r1", "r2", "r3", "r4", "memory"); \
795 #define PROD_A(hi, lo, f, ptr) \
796 ({ \
797 mad_fixed_t *__p = (f); \
798 asm volatile ( \
799 "ldmia %2!, {r0, r1, r2, r3} \n\t" \
800 "ldr r4, [%3, #0] \n\t" \
801 "smlal %0, %1, r0, r4 \n\t" \
802 "ldr r4, [%3, #56] \n\t" \
803 "smlal %0, %1, r1, r4 \n\t" \
804 "ldr r4, [%3, #48] \n\t" \
805 "smlal %0, %1, r2, r4 \n\t" \
806 "ldr r4, [%3, #40] \n\t" \
807 "smlal %0, %1, r3, r4 \n\t" \
808 "ldmia %2, {r0, r1, r2, r3} \n\t" \
809 "ldr r4, [%3, #32] \n\t" \
810 "smlal %0, %1, r0, r4 \n\t" \
811 "ldr r4, [%3, #24] \n\t" \
812 "smlal %0, %1, r1, r4 \n\t" \
813 "ldr r4, [%3, #16] \n\t" \
814 "smlal %0, %1, r2, r4 \n\t" \
815 "ldr r4, [%3, #8] \n\t" \
816 "smlal %0, %1, r3, r4 \n\t" \
817 : "+r" (lo), "+r" (hi), "+r" (__p) \
818 : "r" (ptr) \
819 : "r0", "r1", "r2", "r3", "r4", "memory"); \
822 void synth_full_odd_sbsample (mad_fixed_t *pcm,
823 mad_fixed_t (*fo)[8],
824 mad_fixed_t (*fe)[8],
825 mad_fixed_t const (*D0ptr)[32],
826 mad_fixed_t const (*D1ptr)[32]);
827 void synth_full_even_sbsample(mad_fixed_t *pcm,
828 mad_fixed_t (*fo)[8],
829 mad_fixed_t (*fe)[8],
830 mad_fixed_t const (*D0ptr)[32],
831 mad_fixed_t const (*D1ptr)[32]);
833 static
834 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
835 unsigned int nch, unsigned int ns) ICODE_ATTR_MPA_SYNTH;
836 static
837 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
838 unsigned int nch, unsigned int ns)
840 int p;
841 unsigned int phase, ch, s;
842 mad_fixed_t *pcm, (*filter)[2][2][16][8];
843 mad_fixed_t (*sbsample)[36][32];
844 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
845 mad_fixed_t const (*D0ptr)[32], *ptr;
846 mad_fixed_t const (*D1ptr)[32];
847 mad_fixed64hi_t hi;
848 mad_fixed64lo_t lo;
850 for (ch = 0; ch < nch; ++ch) {
851 sbsample = &(*frame->sbsample_prev)[ch];
852 filter = &synth->filter[ch];
853 phase = synth->phase;
854 pcm = synth->pcm.samples[ch];
856 for (s = 0; s < ns; ++s) {
857 dct32((*sbsample)[s], phase >> 1,
858 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
860 p = (phase - 1) & 0xf;
862 /* calculate 32 samples */
863 fe = &(*filter)[0][ phase & 1][0];
864 fx = &(*filter)[0][~phase & 1][0];
865 fo = &(*filter)[1][~phase & 1][0];
867 D0ptr = (void*)&D[0][ p];
868 D1ptr = (void*)&D[0][-p];
870 if(s & 1)
872 ptr = *D0ptr;
873 PROD_O(hi, lo, *fx, ptr+1);
874 MLN(hi, lo);
875 PROD_A(hi, lo, *fe, ptr);
876 pcm[0] = SHIFT(MLZ(hi, lo));
877 pcm += 16;
879 synth_full_odd_sbsample(pcm, fo, fe, D0ptr, D1ptr);
880 D0ptr += 15;
881 D1ptr += 15;
882 fo += 15;
883 fe += 15;
885 ptr = *(D0ptr + 1);
886 PROD_O(hi, lo, *fo, ptr+1);
887 pcm[0] = SHIFT(-MLZ(hi, lo));
889 else
891 ptr = *D0ptr;
892 PROD_O(hi, lo, *fx, ptr);
893 MLN(hi, lo);
894 PROD_A(hi, lo, *fe, ptr+1);
895 pcm[0] = SHIFT(MLZ(hi, lo));
896 pcm += 16;
898 synth_full_even_sbsample(pcm, fo, fe, D0ptr, D1ptr);
899 D0ptr += 15;
900 D1ptr += 15;
901 fo += 15;
902 fe += 15;
904 ptr = *(D0ptr + 1);
905 PROD_O(hi, lo, *fo, ptr);
906 pcm[0] = SHIFT(-MLZ(hi, lo));
909 pcm += 16;
910 phase = (phase + 1) % 16;
915 # else /* not FPM_COLDFIRE_EMAC and not FPM_ARM */
917 #define PROD_O(hi, lo, f, ptr, offset) \
918 ML0(hi, lo, (*f)[0], ptr[ 0+offset]); \
919 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
920 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
921 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
922 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
923 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
924 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
925 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
927 #define PROD_A(hi, lo, f, ptr, offset) \
928 MLA(hi, lo, (*f)[0], ptr[ 0+offset]); \
929 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
930 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
931 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
932 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
933 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
934 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
935 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
937 #define PROD_SB(hi, lo, ptr, offset, first_idx, last_idx) \
938 ML0(hi, lo, (*fe)[0], ptr[first_idx]); \
939 MLA(hi, lo, (*fe)[1], ptr[16+offset]); \
940 MLA(hi, lo, (*fe)[2], ptr[18+offset]); \
941 MLA(hi, lo, (*fe)[3], ptr[20+offset]); \
942 MLA(hi, lo, (*fe)[4], ptr[22+offset]); \
943 MLA(hi, lo, (*fe)[5], ptr[24+offset]); \
944 MLA(hi, lo, (*fe)[6], ptr[26+offset]); \
945 MLA(hi, lo, (*fe)[7], ptr[28+offset]); \
946 MLA(hi, lo, (*fo)[7], ptr[29-offset]); \
947 MLA(hi, lo, (*fo)[6], ptr[27-offset]); \
948 MLA(hi, lo, (*fo)[5], ptr[25-offset]); \
949 MLA(hi, lo, (*fo)[4], ptr[23-offset]); \
950 MLA(hi, lo, (*fo)[3], ptr[21-offset]); \
951 MLA(hi, lo, (*fo)[2], ptr[19-offset]); \
952 MLA(hi, lo, (*fo)[1], ptr[17-offset]); \
953 MLA(hi, lo, (*fo)[0], ptr[last_idx ]);
955 static
956 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
957 unsigned int nch, unsigned int ns)
959 int p, sb;
960 unsigned int phase, ch, s;
961 mad_fixed_t *pcm, (*filter)[2][2][16][8];
962 mad_fixed_t (*sbsample)[36][32];
963 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
964 mad_fixed_t const (*D0ptr)[32], *ptr;
965 mad_fixed_t const (*D1ptr)[32];
966 mad_fixed64hi_t hi;
967 mad_fixed64lo_t lo;
969 for (ch = 0; ch < nch; ++ch) {
970 sbsample = &(*frame->sbsample_prev)[ch];
971 filter = &synth->filter[ch];
972 phase = synth->phase;
973 pcm = synth->pcm.samples[ch];
975 for (s = 0; s < ns; ++s) {
976 dct32((*sbsample)[s], phase >> 1,
977 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
979 p = (phase - 1) & 0xf;
981 /* calculate 32 samples */
982 fe = &(*filter)[0][ phase & 1][0];
983 fx = &(*filter)[0][~phase & 1][0];
984 fo = &(*filter)[1][~phase & 1][0];
986 D0ptr = (void*)&D[0][ p];
987 D1ptr = (void*)&D[0][-p];
989 if(s & 1)
991 ptr = *D0ptr;
992 PROD_O(hi, lo, fx, ptr, 1)
993 MLN(hi, lo);
994 PROD_A(hi, lo, fe, ptr, 0)
995 pcm[0] = SHIFT(MLZ(hi, lo));
996 pcm += 16;
998 for (sb = 15; sb; sb--, fo++)
1000 ++fe;
1001 ++D0ptr;
1002 ++D1ptr;
1004 /* D[32 - sb][i] == -D[sb][31 - i] */
1005 ptr = *D0ptr;
1006 PROD_O(hi, lo, fo, ptr, 1)
1007 MLN(hi, lo);
1008 PROD_A(hi, lo, fe, ptr, 0)
1009 pcm[-sb] = SHIFT(MLZ(hi, lo));
1011 ptr = *D1ptr;
1012 PROD_SB(hi, lo, ptr, 1, 15, 30)
1013 pcm[sb] = SHIFT(MLZ(hi, lo));
1016 ptr = *(D0ptr + 1);
1017 PROD_O(hi, lo, fo, ptr, 1)
1018 pcm[0] = SHIFT(-MLZ(hi, lo));
1020 else
1022 ptr = *D0ptr;
1023 PROD_O(hi, lo, fx, ptr, 0)
1024 MLN(hi, lo);
1025 PROD_A(hi, lo, fe, ptr, 1)
1026 pcm[0] = SHIFT(MLZ(hi, lo));
1027 pcm += 16;
1029 for (sb = 15; sb; sb--, fo++)
1031 ++fe;
1032 ++D0ptr;
1033 ++D1ptr;
1035 /* D[32 - sb][i] == -D[sb][31 - i] */
1036 ptr = *D0ptr;
1037 PROD_O(hi, lo, fo, ptr, 0)
1038 MLN(hi, lo);
1039 PROD_A(hi, lo, fe, ptr, 1)
1040 pcm[-sb] = SHIFT(MLZ(hi, lo));
1042 ptr = *D1ptr;
1043 PROD_SB(hi, lo, ptr, 0, 30, 15)
1044 pcm[sb] = SHIFT(MLZ(hi, lo));
1047 ptr = *(D0ptr + 1);
1048 PROD_O(hi, lo, fo, ptr, 0)
1049 pcm[0] = SHIFT(-MLZ(hi, lo));
1052 pcm += 16;
1053 phase = (phase + 1) % 16;
1057 # endif /* FPM_COLDFIRE_EMAC, FPM_ARM */
1059 #if 0 /* rockbox: unused */
1061 * NAME: synth->half()
1062 * DESCRIPTION: perform half frequency PCM synthesis
1064 static
1065 void synth_half(struct mad_synth *synth, struct mad_frame const *frame,
1066 unsigned int nch, unsigned int ns)
1068 unsigned int phase, ch, s, sb, pe, po;
1069 mad_fixed_t *pcm1, *pcm2, (*filter)[2][2][16][8];
1070 mad_fixed_t (*sbsample)[36][32];
1071 register mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
1072 register mad_fixed_t const (*Dptr)[32], *ptr;
1073 register mad_fixed64hi_t hi;
1074 register mad_fixed64lo_t lo;
1076 for (ch = 0; ch < nch; ++ch) {
1077 sbsample = &(*frame->sbsample_prev)[ch];
1078 filter = &synth->filter[ch];
1079 phase = synth->phase;
1080 pcm1 = synth->pcm.samples[ch];
1082 for (s = 0; s < ns; ++s) {
1083 dct32((*sbsample)[s], phase >> 1,
1084 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
1086 pe = phase & ~1;
1087 po = ((phase - 1) & 0xf) | 1;
1089 /* calculate 16 samples */
1091 fe = &(*filter)[0][ phase & 1][0];
1092 fx = &(*filter)[0][~phase & 1][0];
1093 fo = &(*filter)[1][~phase & 1][0];
1095 Dptr = &D[0];
1097 ptr = *Dptr + po;
1098 ML0(hi, lo, (*fx)[0], ptr[ 0]);
1099 MLA(hi, lo, (*fx)[1], ptr[14]);
1100 MLA(hi, lo, (*fx)[2], ptr[12]);
1101 MLA(hi, lo, (*fx)[3], ptr[10]);
1102 MLA(hi, lo, (*fx)[4], ptr[ 8]);
1103 MLA(hi, lo, (*fx)[5], ptr[ 6]);
1104 MLA(hi, lo, (*fx)[6], ptr[ 4]);
1105 MLA(hi, lo, (*fx)[7], ptr[ 2]);
1106 MLN(hi, lo);
1108 ptr = *Dptr + pe;
1109 MLA(hi, lo, (*fe)[0], ptr[ 0]);
1110 MLA(hi, lo, (*fe)[1], ptr[14]);
1111 MLA(hi, lo, (*fe)[2], ptr[12]);
1112 MLA(hi, lo, (*fe)[3], ptr[10]);
1113 MLA(hi, lo, (*fe)[4], ptr[ 8]);
1114 MLA(hi, lo, (*fe)[5], ptr[ 6]);
1115 MLA(hi, lo, (*fe)[6], ptr[ 4]);
1116 MLA(hi, lo, (*fe)[7], ptr[ 2]);
1118 *pcm1++ = SHIFT(MLZ(hi, lo));
1120 pcm2 = pcm1 + 14;
1122 for (sb = 1; sb < 16; ++sb) {
1123 ++fe;
1124 ++Dptr;
1126 /* D[32 - sb][i] == -D[sb][31 - i] */
1128 if (!(sb & 1)) {
1129 ptr = *Dptr + po;
1130 ML0(hi, lo, (*fo)[0], ptr[ 0]);
1131 MLA(hi, lo, (*fo)[1], ptr[14]);
1132 MLA(hi, lo, (*fo)[2], ptr[12]);
1133 MLA(hi, lo, (*fo)[3], ptr[10]);
1134 MLA(hi, lo, (*fo)[4], ptr[ 8]);
1135 MLA(hi, lo, (*fo)[5], ptr[ 6]);
1136 MLA(hi, lo, (*fo)[6], ptr[ 4]);
1137 MLA(hi, lo, (*fo)[7], ptr[ 2]);
1138 MLN(hi, lo);
1140 ptr = *Dptr + pe;
1141 MLA(hi, lo, (*fe)[7], ptr[ 2]);
1142 MLA(hi, lo, (*fe)[6], ptr[ 4]);
1143 MLA(hi, lo, (*fe)[5], ptr[ 6]);
1144 MLA(hi, lo, (*fe)[4], ptr[ 8]);
1145 MLA(hi, lo, (*fe)[3], ptr[10]);
1146 MLA(hi, lo, (*fe)[2], ptr[12]);
1147 MLA(hi, lo, (*fe)[1], ptr[14]);
1148 MLA(hi, lo, (*fe)[0], ptr[ 0]);
1150 *pcm1++ = SHIFT(MLZ(hi, lo));
1152 ptr = *Dptr - po;
1153 ML0(hi, lo, (*fo)[7], ptr[31 - 2]);
1154 MLA(hi, lo, (*fo)[6], ptr[31 - 4]);
1155 MLA(hi, lo, (*fo)[5], ptr[31 - 6]);
1156 MLA(hi, lo, (*fo)[4], ptr[31 - 8]);
1157 MLA(hi, lo, (*fo)[3], ptr[31 - 10]);
1158 MLA(hi, lo, (*fo)[2], ptr[31 - 12]);
1159 MLA(hi, lo, (*fo)[1], ptr[31 - 14]);
1160 MLA(hi, lo, (*fo)[0], ptr[31 - 16]);
1162 ptr = *Dptr - pe;
1163 MLA(hi, lo, (*fe)[0], ptr[31 - 16]);
1164 MLA(hi, lo, (*fe)[1], ptr[31 - 14]);
1165 MLA(hi, lo, (*fe)[2], ptr[31 - 12]);
1166 MLA(hi, lo, (*fe)[3], ptr[31 - 10]);
1167 MLA(hi, lo, (*fe)[4], ptr[31 - 8]);
1168 MLA(hi, lo, (*fe)[5], ptr[31 - 6]);
1169 MLA(hi, lo, (*fe)[6], ptr[31 - 4]);
1170 MLA(hi, lo, (*fe)[7], ptr[31 - 2]);
1172 *pcm2-- = SHIFT(MLZ(hi, lo));
1175 ++fo;
1178 ++Dptr;
1180 ptr = *Dptr + po;
1181 ML0(hi, lo, (*fo)[0], ptr[ 0]);
1182 MLA(hi, lo, (*fo)[1], ptr[14]);
1183 MLA(hi, lo, (*fo)[2], ptr[12]);
1184 MLA(hi, lo, (*fo)[3], ptr[10]);
1185 MLA(hi, lo, (*fo)[4], ptr[ 8]);
1186 MLA(hi, lo, (*fo)[5], ptr[ 6]);
1187 MLA(hi, lo, (*fo)[6], ptr[ 4]);
1188 MLA(hi, lo, (*fo)[7], ptr[ 2]);
1190 *pcm1 = SHIFT(-MLZ(hi, lo));
1191 pcm1 += 8;
1193 phase = (phase + 1) % 16;
1197 #endif /* unused */
1200 * NAME: synth->frame()
1201 * DESCRIPTION: perform PCM synthesis of frame subband samples
1203 void mad_synth_frame(struct mad_synth *synth, struct mad_frame const *frame)
1205 unsigned int nch, ns;
1206 #if 0 /* rockbox: unused */
1207 void (*synth_frame)(struct mad_synth *, struct mad_frame const *,
1208 unsigned int, unsigned int);
1209 #endif
1211 nch = MAD_NCHANNELS(&frame->header);
1212 ns = MAD_NSBSAMPLES(&frame->header);
1214 synth->pcm.samplerate = frame->header.samplerate;
1215 synth->pcm.channels = nch;
1216 synth->pcm.length = 32 * ns;
1218 #if 0 /* rockbox: unused */
1219 synth_frame = synth_full;
1221 if (frame->options & MAD_OPTION_HALFSAMPLERATE) {
1222 synth->pcm.samplerate /= 2;
1223 synth->pcm.length /= 2;
1225 synth_frame = synth_half;
1228 synth_frame(synth, frame, nch, ns);
1229 #else
1230 synth_full(synth, frame, nch, ns);
1231 #endif
1233 synth->phase = (synth->phase + ns) % 16;