Refacture libmad's synthesis filter via macro usage. Comment unused special synthesis...
[kugel-rb.git] / apps / codecs / libmad / synth.c
blob89f971a1ce5016a258f70fedb0708f6f2aefa183
1 /*
2 * libmad - MPEG audio decoder library
3 * Copyright (C) 2000-2004 Underbit Technologies, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * $Id$
22 # ifdef HAVE_CONFIG_H
23 # include "config.h"
24 # endif
26 # include "global.h"
28 # include "fixed.h"
29 # include "frame.h"
30 # include "synth.h"
33 * NAME: synth->init()
34 * DESCRIPTION: initialize synth struct
36 void mad_synth_init(struct mad_synth *synth)
38 mad_synth_mute(synth);
40 synth->phase = 0;
42 synth->pcm.samplerate = 0;
43 synth->pcm.channels = 0;
44 synth->pcm.length = 0;
45 #if defined(CPU_COLDFIRE)
46 /* init the emac unit here, since this function should always be called
47 before using libmad */
48 coldfire_set_macsr(EMAC_FRACTIONAL | EMAC_SATURATE | EMAC_ROUND);
49 #endif
53 * NAME: synth->mute()
54 * DESCRIPTION: zero all polyphase filterbank values, resetting synthesis
56 void mad_synth_mute(struct mad_synth *synth)
59 unsigned int ch, s, v;
61 for (ch = 0; ch < 2; ++ch) {
62 for (s = 0; s < 16; ++s) {
63 for (v = 0; v < 8; ++v) {
64 synth->filter[ch][0][0][s][v] = synth->filter[ch][0][1][s][v] =
65 synth->filter[ch][1][0][s][v] = synth->filter[ch][1][1][s][v] = 0;
70 memset(synth->filter, 0, sizeof(synth->filter));
73 #if 0 /* dct32 asm implementation is slower on current arm systems */
74 /* #ifdef FPM_ARM */
76 void dct32(mad_fixed_t const in[32], unsigned int slot,
77 mad_fixed_t lo[16][8], mad_fixed_t hi[16][8]);
79 #else
82 * An optional optimization called here the Subband Synthesis Optimization
83 * (SSO) improves the performance of subband synthesis at the expense of
84 * accuracy.
86 * The idea is to simplify 32x32->64-bit multiplication to 32x32->32 such
87 * that extra scaling and rounding are not necessary. This often allows the
88 * compiler to use faster 32-bit multiply-accumulate instructions instead of
89 * explicit 64-bit multiply, shift, and add instructions.
91 * SSO works like this: a full 32x32->64-bit multiply of two mad_fixed_t
92 * values requires the result to be right-shifted 28 bits to be properly
93 * scaled to the same fixed-point format. Right shifts can be applied at any
94 * time to either operand or to the result, so the optimization involves
95 * careful placement of these shifts to minimize the loss of accuracy.
97 * First, a 14-bit shift is applied with rounding at compile-time to the D[]
98 * table of coefficients for the subband synthesis window. This only loses 2
99 * bits of accuracy because the lower 12 bits are always zero. A second
100 * 12-bit shift occurs after the DCT calculation. This loses 12 bits of
101 * accuracy. Finally, a third 2-bit shift occurs just before the sample is
102 * saved in the PCM buffer. 14 + 12 + 2 == 28 bits.
105 /* FPM_DEFAULT without OPT_SSO will actually lose accuracy and performance */
107 # if defined(FPM_DEFAULT) && !defined(OPT_SSO)
108 # define OPT_SSO
109 # endif
111 /* second SSO shift, with rounding */
113 # if defined(OPT_SSO)
114 # define SHIFT(x) (((x) + (1L << 11)) >> 12)
115 # else
116 # define SHIFT(x) (x)
117 # endif
119 /* possible DCT speed optimization */
121 /* This is a Coldfire version of the OPT_SPEED optimisation below, but in the
122 case of Coldfire it doesn't lose any more precision than we would ordinarily
123 lose, */
124 # ifdef FPM_COLDFIRE_EMAC
125 # define OPT_DCTO
126 # define MUL(x, y) \
127 ({ \
128 mad_fixed64hi_t hi; \
129 asm volatile("mac.l %[a], %[b], %%acc0\n\t" \
130 "movclr.l %%acc0, %[hi]" \
131 : [hi] "=r" (hi) \
132 : [a] "r" ((x)), [b] "r" ((y))); \
133 hi; \
135 # elif defined(OPT_SPEED) && defined(MAD_F_MLX)
136 # define OPT_DCTO
137 # define MUL(x, y) \
138 ({ mad_fixed64hi_t hi; \
139 mad_fixed64lo_t lo; \
140 MAD_F_MLX(hi, lo, (x), (y)); \
141 hi << (32 - MAD_F_SCALEBITS - 3); \
143 # else
144 # undef OPT_DCTO
145 # define MUL(x, y) mad_f_mul((x), (y))
146 # endif
149 * NAME: dct32()
150 * DESCRIPTION: perform fast in[32]->out[32] DCT
152 static
153 void dct32(mad_fixed_t const in[32], unsigned int slot,
154 mad_fixed_t lo[16][8], mad_fixed_t hi[16][8])
156 mad_fixed_t t0, t1, t2, t3, t4, t5, t6, t7;
157 mad_fixed_t t8, t9, t10, t11, t12, t13, t14, t15;
158 mad_fixed_t t16, t17, t18, t19, t20, t21, t22, t23;
159 mad_fixed_t t24, t25, t26, t27, t28, t29, t30, t31;
160 mad_fixed_t t32, t33, t34, t35, t36, t37, t38, t39;
161 mad_fixed_t t40, t41, t42, t43, t44, t45, t46, t47;
162 mad_fixed_t t48, t49, t50, t51, t52, t53, t54, t55;
163 mad_fixed_t t56, t57, t58, t59, t60, t61, t62, t63;
164 mad_fixed_t t64, t65, t66, t67, t68, t69, t70, t71;
165 mad_fixed_t t72, t73, t74, t75, t76, t77, t78, t79;
166 mad_fixed_t t80, t81, t82, t83, t84, t85, t86, t87;
167 mad_fixed_t t88, t89, t90, t91, t92, t93, t94, t95;
168 mad_fixed_t t96, t97, t98, t99, t100, t101, t102, t103;
169 mad_fixed_t t104, t105, t106, t107, t108, t109, t110, t111;
170 mad_fixed_t t112, t113, t114, t115, t116, t117, t118, t119;
171 mad_fixed_t t120, t121, t122, t123, t124, t125, t126, t127;
172 mad_fixed_t t128, t129, t130, t131, t132, t133, t134, t135;
173 mad_fixed_t t136, t137, t138, t139, t140, t141, t142, t143;
174 mad_fixed_t t144, t145, t146, t147, t148, t149, t150, t151;
175 mad_fixed_t t152, t153, t154, t155, t156, t157, t158, t159;
176 mad_fixed_t t160, t161, t162, t163, t164, t165, t166, t167;
177 mad_fixed_t t168, t169, t170, t171, t172, t173, t174, t175;
178 mad_fixed_t t176;
180 /* costab[i] = cos(PI / (2 * 32) * i) */
182 # if defined(OPT_DCTO)
183 # define costab1 MAD_F(0x7fd8878e)
184 # define costab2 MAD_F(0x7f62368f)
185 # define costab3 MAD_F(0x7e9d55fc)
186 # define costab4 MAD_F(0x7d8a5f40)
187 # define costab5 MAD_F(0x7c29fbee)
188 # define costab6 MAD_F(0x7a7d055b)
189 # define costab7 MAD_F(0x78848414)
190 # define costab8 MAD_F(0x7641af3d)
191 # define costab9 MAD_F(0x73b5ebd1)
192 # define costab10 MAD_F(0x70e2cbc6)
193 # define costab11 MAD_F(0x6dca0d14)
194 # define costab12 MAD_F(0x6a5d98a4)
195 # define costab13 MAD_F(0x66cf8120)
196 # define costab14 MAD_F(0x62f201ac)
197 # define costab15 MAD_F(0x5ed77c8a)
198 # define costab16 MAD_F(0x5a82799a)
199 # define costab17 MAD_F(0x55f5a4d2)
200 # define costab18 MAD_F(0x5133cc94)
201 # define costab19 MAD_F(0x4c3fdff4)
202 # define costab20 MAD_F(0x471cece7)
203 # define costab21 MAD_F(0x41ce1e65)
204 # define costab22 MAD_F(0x3c56ba70)
205 # define costab23 MAD_F(0x36ba2014)
206 # define costab24 MAD_F(0x30fbc54d)
207 # define costab25 MAD_F(0x2b1f34eb)
208 # define costab26 MAD_F(0x25280c5e)
209 # define costab27 MAD_F(0x1f19f97b)
210 # define costab28 MAD_F(0x18f8b83c)
211 # define costab29 MAD_F(0x12c8106f)
212 # define costab30 MAD_F(0x0c8bd35e)
213 # define costab31 MAD_F(0x0647d97c)
214 # else
215 # define costab1 MAD_F(0x0ffb10f2) /* 0.998795456 */
216 # define costab2 MAD_F(0x0fec46d2) /* 0.995184727 */
217 # define costab3 MAD_F(0x0fd3aac0) /* 0.989176510 */
218 # define costab4 MAD_F(0x0fb14be8) /* 0.980785280 */
219 # define costab5 MAD_F(0x0f853f7e) /* 0.970031253 */
220 # define costab6 MAD_F(0x0f4fa0ab) /* 0.956940336 */
221 # define costab7 MAD_F(0x0f109082) /* 0.941544065 */
222 # define costab8 MAD_F(0x0ec835e8) /* 0.923879533 */
223 # define costab9 MAD_F(0x0e76bd7a) /* 0.903989293 */
224 # define costab10 MAD_F(0x0e1c5979) /* 0.881921264 */
225 # define costab11 MAD_F(0x0db941a3) /* 0.857728610 */
226 # define costab12 MAD_F(0x0d4db315) /* 0.831469612 */
227 # define costab13 MAD_F(0x0cd9f024) /* 0.803207531 */
228 # define costab14 MAD_F(0x0c5e4036) /* 0.773010453 */
229 # define costab15 MAD_F(0x0bdaef91) /* 0.740951125 */
230 # define costab16 MAD_F(0x0b504f33) /* 0.707106781 */
231 # define costab17 MAD_F(0x0abeb49a) /* 0.671558955 */
232 # define costab18 MAD_F(0x0a267993) /* 0.634393284 */
233 # define costab19 MAD_F(0x0987fbfe) /* 0.595699304 */
234 # define costab20 MAD_F(0x08e39d9d) /* 0.555570233 */
235 # define costab21 MAD_F(0x0839c3cd) /* 0.514102744 */
236 # define costab22 MAD_F(0x078ad74e) /* 0.471396737 */
237 # define costab23 MAD_F(0x06d74402) /* 0.427555093 */
238 # define costab24 MAD_F(0x061f78aa) /* 0.382683432 */
239 # define costab25 MAD_F(0x0563e69d) /* 0.336889853 */
240 # define costab26 MAD_F(0x04a5018c) /* 0.290284677 */
241 # define costab27 MAD_F(0x03e33f2f) /* 0.242980180 */
242 # define costab28 MAD_F(0x031f1708) /* 0.195090322 */
243 # define costab29 MAD_F(0x0259020e) /* 0.146730474 */
244 # define costab30 MAD_F(0x01917a5c) /* 0.098017140 */
245 # define costab31 MAD_F(0x00c8fb30) /* 0.049067674 */
246 # endif
248 t0 = in[0] + in[31]; t16 = MUL(in[0] - in[31], costab1);
249 t1 = in[15] + in[16]; t17 = MUL(in[15] - in[16], costab31);
251 t41 = t16 + t17;
252 t59 = MUL(t16 - t17, costab2);
253 t33 = t0 + t1;
254 t50 = MUL(t0 - t1, costab2);
256 t2 = in[7] + in[24]; t18 = MUL(in[7] - in[24], costab15);
257 t3 = in[8] + in[23]; t19 = MUL(in[8] - in[23], costab17);
259 t42 = t18 + t19;
260 t60 = MUL(t18 - t19, costab30);
261 t34 = t2 + t3;
262 t51 = MUL(t2 - t3, costab30);
264 t4 = in[3] + in[28]; t20 = MUL(in[3] - in[28], costab7);
265 t5 = in[12] + in[19]; t21 = MUL(in[12] - in[19], costab25);
267 t43 = t20 + t21;
268 t61 = MUL(t20 - t21, costab14);
269 t35 = t4 + t5;
270 t52 = MUL(t4 - t5, costab14);
272 t6 = in[4] + in[27]; t22 = MUL(in[4] - in[27], costab9);
273 t7 = in[11] + in[20]; t23 = MUL(in[11] - in[20], costab23);
275 t44 = t22 + t23;
276 t62 = MUL(t22 - t23, costab18);
277 t36 = t6 + t7;
278 t53 = MUL(t6 - t7, costab18);
280 t8 = in[1] + in[30]; t24 = MUL(in[1] - in[30], costab3);
281 t9 = in[14] + in[17]; t25 = MUL(in[14] - in[17], costab29);
283 t45 = t24 + t25;
284 t63 = MUL(t24 - t25, costab6);
285 t37 = t8 + t9;
286 t54 = MUL(t8 - t9, costab6);
288 t10 = in[6] + in[25]; t26 = MUL(in[6] - in[25], costab13);
289 t11 = in[9] + in[22]; t27 = MUL(in[9] - in[22], costab19);
291 t46 = t26 + t27;
292 t64 = MUL(t26 - t27, costab26);
293 t38 = t10 + t11;
294 t55 = MUL(t10 - t11, costab26);
296 t12 = in[2] + in[29]; t28 = MUL(in[2] - in[29], costab5);
297 t13 = in[13] + in[18]; t29 = MUL(in[13] - in[18], costab27);
299 t47 = t28 + t29;
300 t65 = MUL(t28 - t29, costab10);
301 t39 = t12 + t13;
302 t56 = MUL(t12 - t13, costab10);
304 t14 = in[5] + in[26]; t30 = MUL(in[5] - in[26], costab11);
305 t15 = in[10] + in[21]; t31 = MUL(in[10] - in[21], costab21);
307 t48 = t30 + t31;
308 t66 = MUL(t30 - t31, costab22);
309 t40 = t14 + t15;
310 t57 = MUL(t14 - t15, costab22);
312 t69 = t33 + t34; t89 = MUL(t33 - t34, costab4);
313 t70 = t35 + t36; t90 = MUL(t35 - t36, costab28);
314 t71 = t37 + t38; t91 = MUL(t37 - t38, costab12);
315 t72 = t39 + t40; t92 = MUL(t39 - t40, costab20);
316 t73 = t41 + t42; t94 = MUL(t41 - t42, costab4);
317 t74 = t43 + t44; t95 = MUL(t43 - t44, costab28);
318 t75 = t45 + t46; t96 = MUL(t45 - t46, costab12);
319 t76 = t47 + t48; t97 = MUL(t47 - t48, costab20);
321 t78 = t50 + t51; t100 = MUL(t50 - t51, costab4);
322 t79 = t52 + t53; t101 = MUL(t52 - t53, costab28);
323 t80 = t54 + t55; t102 = MUL(t54 - t55, costab12);
324 t81 = t56 + t57; t103 = MUL(t56 - t57, costab20);
326 t83 = t59 + t60; t106 = MUL(t59 - t60, costab4);
327 t84 = t61 + t62; t107 = MUL(t61 - t62, costab28);
328 t85 = t63 + t64; t108 = MUL(t63 - t64, costab12);
329 t86 = t65 + t66; t109 = MUL(t65 - t66, costab20);
331 t113 = t69 + t70;
332 t114 = t71 + t72;
334 /* 0 */ hi[15][slot] = SHIFT(t113 + t114);
335 /* 16 */ lo[ 0][slot] = SHIFT(MUL(t113 - t114, costab16));
337 t115 = t73 + t74;
338 t116 = t75 + t76;
340 t32 = t115 + t116;
342 /* 1 */ hi[14][slot] = SHIFT(t32);
344 t118 = t78 + t79;
345 t119 = t80 + t81;
347 t58 = t118 + t119;
349 /* 2 */ hi[13][slot] = SHIFT(t58);
351 t121 = t83 + t84;
352 t122 = t85 + t86;
354 t67 = t121 + t122;
356 t49 = (t67 * 2) - t32;
358 /* 3 */ hi[12][slot] = SHIFT(t49);
360 t125 = t89 + t90;
361 t126 = t91 + t92;
363 t93 = t125 + t126;
365 /* 4 */ hi[11][slot] = SHIFT(t93);
367 t128 = t94 + t95;
368 t129 = t96 + t97;
370 t98 = t128 + t129;
372 t68 = (t98 * 2) - t49;
374 /* 5 */ hi[10][slot] = SHIFT(t68);
376 t132 = t100 + t101;
377 t133 = t102 + t103;
379 t104 = t132 + t133;
381 t82 = (t104 * 2) - t58;
383 /* 6 */ hi[ 9][slot] = SHIFT(t82);
385 t136 = t106 + t107;
386 t137 = t108 + t109;
388 t110 = t136 + t137;
390 t87 = (t110 * 2) - t67;
392 t77 = (t87 * 2) - t68;
394 /* 7 */ hi[ 8][slot] = SHIFT(t77);
396 t141 = MUL(t69 - t70, costab8);
397 t142 = MUL(t71 - t72, costab24);
398 t143 = t141 + t142;
400 /* 8 */ hi[ 7][slot] = SHIFT(t143);
401 /* 24 */ lo[ 8][slot] =
402 SHIFT((MUL(t141 - t142, costab16) * 2) - t143);
404 t144 = MUL(t73 - t74, costab8);
405 t145 = MUL(t75 - t76, costab24);
406 t146 = t144 + t145;
408 t88 = (t146 * 2) - t77;
410 /* 9 */ hi[ 6][slot] = SHIFT(t88);
412 t148 = MUL(t78 - t79, costab8);
413 t149 = MUL(t80 - t81, costab24);
414 t150 = t148 + t149;
416 t105 = (t150 * 2) - t82;
418 /* 10 */ hi[ 5][slot] = SHIFT(t105);
420 t152 = MUL(t83 - t84, costab8);
421 t153 = MUL(t85 - t86, costab24);
422 t154 = t152 + t153;
424 t111 = (t154 * 2) - t87;
426 t99 = (t111 * 2) - t88;
428 /* 11 */ hi[ 4][slot] = SHIFT(t99);
430 t157 = MUL(t89 - t90, costab8);
431 t158 = MUL(t91 - t92, costab24);
432 t159 = t157 + t158;
434 t127 = (t159 * 2) - t93;
436 /* 12 */ hi[ 3][slot] = SHIFT(t127);
438 t160 = (MUL(t125 - t126, costab16) * 2) - t127;
440 /* 20 */ lo[ 4][slot] = SHIFT(t160);
441 /* 28 */ lo[12][slot] =
442 SHIFT((((MUL(t157 - t158, costab16) * 2) - t159) * 2) - t160);
444 t161 = MUL(t94 - t95, costab8);
445 t162 = MUL(t96 - t97, costab24);
446 t163 = t161 + t162;
448 t130 = (t163 * 2) - t98;
450 t112 = (t130 * 2) - t99;
452 /* 13 */ hi[ 2][slot] = SHIFT(t112);
454 t164 = (MUL(t128 - t129, costab16) * 2) - t130;
456 t166 = MUL(t100 - t101, costab8);
457 t167 = MUL(t102 - t103, costab24);
458 t168 = t166 + t167;
460 t134 = (t168 * 2) - t104;
462 t120 = (t134 * 2) - t105;
464 /* 14 */ hi[ 1][slot] = SHIFT(t120);
466 t135 = (MUL(t118 - t119, costab16) * 2) - t120;
468 /* 18 */ lo[ 2][slot] = SHIFT(t135);
470 t169 = (MUL(t132 - t133, costab16) * 2) - t134;
472 t151 = (t169 * 2) - t135;
474 /* 22 */ lo[ 6][slot] = SHIFT(t151);
476 t170 = (((MUL(t148 - t149, costab16) * 2) - t150) * 2) - t151;
478 /* 26 */ lo[10][slot] = SHIFT(t170);
479 /* 30 */ lo[14][slot] =
480 SHIFT((((((MUL(t166 - t167, costab16) * 2) -
481 t168) * 2) - t169) * 2) - t170);
483 t171 = MUL(t106 - t107, costab8);
484 t172 = MUL(t108 - t109, costab24);
485 t173 = t171 + t172;
487 t138 = (t173 * 2) - t110;
489 t123 = (t138 * 2) - t111;
491 t139 = (MUL(t121 - t122, costab16) * 2) - t123;
493 t117 = (t123 * 2) - t112;
495 /* 15 */ hi[ 0][slot] = SHIFT(t117);
497 t124 = (MUL(t115 - t116, costab16) * 2) - t117;
499 /* 17 */ lo[ 1][slot] = SHIFT(t124);
501 t131 = (t139 * 2) - t124;
503 /* 19 */ lo[ 3][slot] = SHIFT(t131);
505 t140 = (t164 * 2) - t131;
507 /* 21 */ lo[ 5][slot] = SHIFT(t140);
509 t174 = (MUL(t136 - t137, costab16) * 2) - t138;
511 t155 = (t174 * 2) - t139;
513 t147 = (t155 * 2) - t140;
515 /* 23 */ lo[ 7][slot] = SHIFT(t147);
517 t156 = (((MUL(t144 - t145, costab16) * 2) - t146) * 2) - t147;
519 /* 25 */ lo[ 9][slot] = SHIFT(t156);
521 t175 = (((MUL(t152 - t153, costab16) * 2) - t154) * 2) - t155;
523 t165 = (t175 * 2) - t156;
525 /* 27 */ lo[11][slot] = SHIFT(t165);
527 t176 = (((((MUL(t161 - t162, costab16) * 2) -
528 t163) * 2) - t164) * 2) - t165;
530 /* 29 */ lo[13][slot] = SHIFT(t176);
531 /* 31 */ lo[15][slot] =
532 SHIFT((((((((MUL(t171 - t172, costab16) * 2) -
533 t173) * 2) - t174) * 2) - t175) * 2) - t176);
536 * Totals:
537 * 80 multiplies
538 * 80 additions
539 * 119 subtractions
540 * 49 shifts (not counting SSO)
544 # undef MUL
545 # undef SHIFT
547 #endif
549 /* third SSO shift and/or D[] optimization preshift */
551 # if defined(OPT_SSO)
552 # if MAD_F_FRACBITS != 28
553 # error "MAD_F_FRACBITS must be 28 to use OPT_SSO"
554 # endif
555 # define ML0(hi, lo, x, y) ((lo) = (x) * (y))
556 # define MLA(hi, lo, x, y) ((lo) += (x) * (y))
557 # define MLN(hi, lo) ((lo) = -(lo))
558 # define MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo))
559 # define SHIFT(x) ((x) >> 2)
560 # define PRESHIFT(x) ((MAD_F(x) + (1L << 13)) >> 14)
561 # else
562 # define ML0(hi, lo, x, y) MAD_F_ML0((hi), (lo), (x), (y))
563 # define MLA(hi, lo, x, y) MAD_F_MLA((hi), (lo), (x), (y))
564 # define MLN(hi, lo) MAD_F_MLN((hi), (lo))
565 # define MLZ(hi, lo) MAD_F_MLZ((hi), (lo))
566 # define SHIFT(x) (x)
567 # if defined(MAD_F_SCALEBITS)
568 # undef MAD_F_SCALEBITS
569 # define MAD_F_SCALEBITS (MAD_F_FRACBITS - 12)
570 # define PRESHIFT(x) (MAD_F(x) >> 12)
571 # else
572 # define PRESHIFT(x) MAD_F(x)
573 # endif
574 # endif
576 static
577 mad_fixed_t const D[17][32] ICONST_ATTR = {
578 # include "D.dat"
581 # if defined(ASO_SYNTH)
582 void synth_full(struct mad_synth *, struct mad_frame const *,
583 unsigned int, unsigned int);
584 # else
586 * NAME: synth->full()
587 * DESCRIPTION: perform full frequency PCM synthesis
590 /* optimised version of synth_full */
591 # ifdef FPM_COLDFIRE_EMAC
592 static
593 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
594 unsigned int nch, unsigned int ns)
596 int sb;
597 unsigned int phase, ch, s, p;
598 mad_fixed_t *pcm, (*filter)[2][2][16][8];
599 mad_fixed_t (*sbsample)[36][32];
600 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
601 mad_fixed_t const (*D0ptr)[32];
602 mad_fixed_t const (*D1ptr)[32];
603 mad_fixed64hi_t hi0, hi1;
605 for (ch = 0; ch < nch; ++ch) {
606 sbsample = &(*frame->sbsample_prev)[ch];
607 filter = &synth->filter[ch];
608 phase = synth->phase;
609 pcm = synth->pcm.samples[ch];
611 for (s = 0; s < ns; ++s) {
612 dct32((*sbsample)[s], phase >> 1,
613 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
615 p = (phase - 1) & 0xf;
617 /* calculate 32 samples */
618 fe = &(*filter)[0][ phase & 1][0];
619 fx = &(*filter)[0][~phase & 1][0];
620 fo = &(*filter)[1][~phase & 1][0];
622 D0ptr = (void*)&D[0][ p];
623 D1ptr = (void*)&D[0][-p];
625 if(s & 1)
627 asm volatile(
628 "movem.l (%1), %%d0-%%d7\n\t"
629 "move.l 4(%2), %%a5\n\t"
630 "msac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t"
631 "msac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t"
632 "msac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t"
633 "msac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t"
634 "msac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t"
635 "msac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t"
636 "msac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t"
637 "msac.l %%d7, %%a5, (%2), %%a5, %%acc0\n\t"
639 "movem.l (%3), %%d0-%%d7\n\t"
640 "mac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t"
641 "mac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t"
642 "mac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t"
643 "mac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t"
644 "mac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t"
645 "mac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t"
646 "mac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t"
647 "mac.l %%d7, %%a5, %%acc0\n\t"
648 "movclr.l %%acc0, %0\n\t"
649 : "=r" (hi0) : "a" (*fx), "a" (*D0ptr), "a" (*fe)
650 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
652 pcm[0] = hi0 << 3; /* shift result to libmad's fixed point format */
653 pcm += 16;
655 for (sb = 15; sb; sb--, fo++) {
656 ++fe;
657 ++D0ptr;
658 ++D1ptr;
660 /* D[32 - sb][i] == -D[sb][31 - i] */
661 asm volatile (
662 "movem.l (%0), %%d0-%%d7\n\t"
663 "move.l 4(%2), %%a5\n\t"
664 "msac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t"
665 "msac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t"
666 "msac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t"
667 "msac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t"
668 "msac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t"
669 "msac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t"
670 "msac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t"
671 "msac.l %%d7, %%a5, 112(%3), %%a5, %%acc0\n\t"
672 "mac.l %%d7, %%a5, 104(%3), %%a5, %%acc1\n\t"
673 "mac.l %%d6, %%a5, 96(%3), %%a5, %%acc1\n\t"
674 "mac.l %%d5, %%a5, 88(%3), %%a5, %%acc1\n\t"
675 "mac.l %%d4, %%a5, 80(%3), %%a5, %%acc1\n\t"
676 "mac.l %%d3, %%a5, 72(%3), %%a5, %%acc1\n\t"
677 "mac.l %%d2, %%a5, 64(%3), %%a5, %%acc1\n\t"
678 "mac.l %%d1, %%a5, 120(%3), %%a5, %%acc1\n\t"
679 "mac.l %%d0, %%a5, 8(%2), %%a5, %%acc1\n\t"
680 "movem.l (%1), %%d0-%%d7\n\t"
681 "mac.l %%d7, %%a5, 16(%2), %%a5, %%acc0\n\t"
682 "mac.l %%d6, %%a5, 24(%2), %%a5, %%acc0\n\t"
683 "mac.l %%d5, %%a5, 32(%2), %%a5, %%acc0\n\t"
684 "mac.l %%d4, %%a5, 40(%2), %%a5, %%acc0\n\t"
685 "mac.l %%d3, %%a5, 48(%2), %%a5, %%acc0\n\t"
686 "mac.l %%d2, %%a5, 56(%2), %%a5, %%acc0\n\t"
687 "mac.l %%d1, %%a5, (%2), %%a5, %%acc0\n\t"
688 "mac.l %%d0, %%a5, 60(%3), %%a5, %%acc0\n\t"
689 "mac.l %%d0, %%a5, 68(%3), %%a5, %%acc1\n\t"
690 "mac.l %%d1, %%a5, 76(%3), %%a5, %%acc1\n\t"
691 "mac.l %%d2, %%a5, 84(%3), %%a5, %%acc1\n\t"
692 "mac.l %%d3, %%a5, 92(%3), %%a5, %%acc1\n\t"
693 "mac.l %%d4, %%a5, 100(%3), %%a5, %%acc1\n\t"
694 "mac.l %%d5, %%a5, 108(%3), %%a5, %%acc1\n\t"
695 "mac.l %%d6, %%a5, 116(%3), %%a5, %%acc1\n\t"
696 "mac.l %%d7, %%a5, %%acc1\n\t"
697 : : "a" (*fo), "a" (*fe), "a" (*D0ptr), "a" (*D1ptr)
698 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
700 asm volatile(
701 "movclr.l %%acc0, %0\n\t"
702 "movclr.l %%acc1, %1\n\t" : "=d" (hi0), "=d" (hi1) );
704 pcm[-sb] = hi0 << 3;
705 pcm[ sb] = hi1 << 3;
708 ++D0ptr;
709 asm volatile(
710 "movem.l (%1), %%d0-%%d7\n\t"
711 "move.l 4(%2), %%a5\n\t"
712 "mac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t"
713 "mac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t"
714 "mac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t"
715 "mac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t"
716 "mac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t"
717 "mac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t"
718 "mac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t"
719 "mac.l %%d7, %%a5, %%acc0\n\t"
720 "movclr.l %%acc0, %0\n\t"
721 : "=r" (hi0) : "a" (*fo), "a" (*D0ptr)
722 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
724 pcm[0] = -(hi0 << 3);
726 else
728 asm volatile(
729 "movem.l (%1), %%d0-%%d7\n\t"
730 "move.l (%2), %%a5\n\t"
731 "msac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t"
732 "msac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t"
733 "msac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t"
734 "msac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t"
735 "msac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t"
736 "msac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t"
737 "msac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t"
738 "msac.l %%d7, %%a5, 4(%2), %%a5, %%acc0\n\t"
740 "movem.l (%3), %%d0-%%d7\n\t"
741 "mac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t"
742 "mac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t"
743 "mac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t"
744 "mac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t"
745 "mac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t"
746 "mac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t"
747 "mac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t"
748 "mac.l %%d7, %%a5, %%acc0\n\t"
749 "movclr.l %%acc0, %0\n\t"
750 : "=r" (hi0) : "a" (*fx), "a" (*D0ptr), "a" (*fe)
751 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
753 pcm[0] = hi0 << 3; /* shift result to libmad's fixed point format */
754 pcm += 16;
756 for (sb = 15; sb; sb--, fo++) {
757 ++fe;
758 ++D0ptr;
759 ++D1ptr;
761 /* D[32 - sb][i] == -D[sb][31 - i] */
762 asm volatile (
763 "movem.l (%0), %%d0-%%d7\n\t"
764 "move.l (%2), %%a5\n\t"
765 "msac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t"
766 "msac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t"
767 "msac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t"
768 "msac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t"
769 "msac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t"
770 "msac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t"
771 "msac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t"
772 "msac.l %%d7, %%a5, 116(%3), %%a5, %%acc0\n\t"
773 "mac.l %%d7, %%a5, 108(%3), %%a5, %%acc1\n\t"
774 "mac.l %%d6, %%a5, 100(%3), %%a5, %%acc1\n\t"
775 "mac.l %%d5, %%a5, 92(%3), %%a5, %%acc1\n\t"
776 "mac.l %%d4, %%a5, 84(%3), %%a5, %%acc1\n\t"
777 "mac.l %%d3, %%a5, 76(%3), %%a5, %%acc1\n\t"
778 "mac.l %%d2, %%a5, 68(%3), %%a5, %%acc1\n\t"
779 "mac.l %%d1, %%a5, 60(%3), %%a5, %%acc1\n\t"
780 "mac.l %%d0, %%a5, 12(%2), %%a5, %%acc1\n\t"
781 "movem.l (%1), %%d0-%%d7\n\t"
782 "mac.l %%d7, %%a5, 20(%2), %%a5, %%acc0\n\t"
783 "mac.l %%d6, %%a5, 28(%2), %%a5, %%acc0\n\t"
784 "mac.l %%d5, %%a5, 36(%2), %%a5, %%acc0\n\t"
785 "mac.l %%d4, %%a5, 44(%2), %%a5, %%acc0\n\t"
786 "mac.l %%d3, %%a5, 52(%2), %%a5, %%acc0\n\t"
787 "mac.l %%d2, %%a5, 60(%2), %%a5, %%acc0\n\t"
788 "mac.l %%d1, %%a5, 4(%2), %%a5, %%acc0\n\t"
789 "mac.l %%d0, %%a5, 120(%3), %%a5, %%acc0\n\t"
790 "mac.l %%d0, %%a5, 64(%3), %%a5, %%acc1\n\t"
791 "mac.l %%d1, %%a5, 72(%3), %%a5, %%acc1\n\t"
792 "mac.l %%d2, %%a5, 80(%3), %%a5, %%acc1\n\t"
793 "mac.l %%d3, %%a5, 88(%3), %%a5, %%acc1\n\t"
794 "mac.l %%d4, %%a5, 96(%3), %%a5, %%acc1\n\t"
795 "mac.l %%d5, %%a5, 104(%3), %%a5, %%acc1\n\t"
796 "mac.l %%d6, %%a5, 112(%3), %%a5, %%acc1\n\t"
797 "mac.l %%d7, %%a5, %%acc1\n\t"
798 : : "a" (*fo), "a" (*fe), "a" (*D0ptr), "a" (*D1ptr)
799 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
801 asm volatile(
802 "movclr.l %%acc0, %0\n\t"
803 "movclr.l %%acc1, %1\n\t" : "=d" (hi0), "=d" (hi1) );
805 pcm[-sb] = hi0 << 3;
806 pcm[ sb] = hi1 << 3;
809 ++D0ptr;
810 asm volatile(
811 "movem.l (%1), %%d0-%%d7\n\t"
812 "move.l (%2), %%a5\n\t"
813 "mac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t"
814 "mac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t"
815 "mac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t"
816 "mac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t"
817 "mac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t"
818 "mac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t"
819 "mac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t"
820 "mac.l %%d7, %%a5, %%acc0\n\t"
821 "movclr.l %%acc0, %0\n\t"
822 : "=r" (hi0) : "a" (*fo), "a" (*D0ptr)
823 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5");
825 pcm[0] = -(hi0 << 3);
827 pcm += 16;
828 phase = (phase + 1) % 16;
833 #elif defined(FPM_ARM)
835 #define PROD_O(hi, lo, f, ptr) \
836 ({ \
837 mad_fixed_t *__p = (f); \
838 asm volatile ( \
839 "ldmia %2!, {r0, r1, r2, r3} \n\t" \
840 "ldr r4, [%3, #0] \n\t" \
841 "smull %0, %1, r0, r4 \n\t" \
842 "ldr r4, [%3, #56] \n\t" \
843 "smlal %0, %1, r1, r4 \n\t" \
844 "ldr r4, [%3, #48] \n\t" \
845 "smlal %0, %1, r2, r4 \n\t" \
846 "ldr r4, [%3, #40] \n\t" \
847 "smlal %0, %1, r3, r4 \n\t" \
848 "ldmia %2, {r0, r1, r2, r3} \n\t" \
849 "ldr r4, [%3, #32] \n\t" \
850 "smlal %0, %1, r0, r4 \n\t" \
851 "ldr r4, [%3, #24] \n\t" \
852 "smlal %0, %1, r1, r4 \n\t" \
853 "ldr r4, [%3, #16] \n\t" \
854 "smlal %0, %1, r2, r4 \n\t" \
855 "ldr r4, [%3, #8] \n\t" \
856 "smlal %0, %1, r3, r4 \n\t" \
857 : "=&r" (lo), "=&r" (hi), "+r" (__p) \
858 : "r" (ptr) \
859 : "r0", "r1", "r2", "r3", "r4", "memory"); \
862 #define PROD_A(hi, lo, f, ptr) \
863 ({ \
864 mad_fixed_t *__p = (f); \
865 asm volatile ( \
866 "ldmia %2!, {r0, r1, r2, r3} \n\t" \
867 "ldr r4, [%3, #0] \n\t" \
868 "smlal %0, %1, r0, r4 \n\t" \
869 "ldr r4, [%3, #56] \n\t" \
870 "smlal %0, %1, r1, r4 \n\t" \
871 "ldr r4, [%3, #48] \n\t" \
872 "smlal %0, %1, r2, r4 \n\t" \
873 "ldr r4, [%3, #40] \n\t" \
874 "smlal %0, %1, r3, r4 \n\t" \
875 "ldmia %2, {r0, r1, r2, r3} \n\t" \
876 "ldr r4, [%3, #32] \n\t" \
877 "smlal %0, %1, r0, r4 \n\t" \
878 "ldr r4, [%3, #24] \n\t" \
879 "smlal %0, %1, r1, r4 \n\t" \
880 "ldr r4, [%3, #16] \n\t" \
881 "smlal %0, %1, r2, r4 \n\t" \
882 "ldr r4, [%3, #8] \n\t" \
883 "smlal %0, %1, r3, r4 \n\t" \
884 : "+r" (lo), "+r" (hi), "+r" (__p) \
885 : "r" (ptr) \
886 : "r0", "r1", "r2", "r3", "r4", "memory"); \
889 void synth_full_odd_sbsample (mad_fixed_t *pcm,
890 mad_fixed_t (*fo)[8],
891 mad_fixed_t (*fe)[8],
892 mad_fixed_t const (*D0ptr)[32],
893 mad_fixed_t const (*D1ptr)[32]);
894 void synth_full_even_sbsample(mad_fixed_t *pcm,
895 mad_fixed_t (*fo)[8],
896 mad_fixed_t (*fe)[8],
897 mad_fixed_t const (*D0ptr)[32],
898 mad_fixed_t const (*D1ptr)[32]);
900 static
901 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
902 unsigned int nch, unsigned int ns) ICODE_ATTR_MPA_SYNTH;
903 static
904 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
905 unsigned int nch, unsigned int ns)
907 int p;
908 unsigned int phase, ch, s;
909 mad_fixed_t *pcm, (*filter)[2][2][16][8];
910 mad_fixed_t (*sbsample)[36][32];
911 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
912 mad_fixed_t const (*D0ptr)[32], *ptr;
913 mad_fixed_t const (*D1ptr)[32];
914 mad_fixed64hi_t hi;
915 mad_fixed64lo_t lo;
917 for (ch = 0; ch < nch; ++ch) {
918 sbsample = &(*frame->sbsample_prev)[ch];
919 filter = &synth->filter[ch];
920 phase = synth->phase;
921 pcm = synth->pcm.samples[ch];
923 for (s = 0; s < ns; ++s) {
924 dct32((*sbsample)[s], phase >> 1,
925 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
927 p = (phase - 1) & 0xf;
929 /* calculate 32 samples */
930 fe = &(*filter)[0][ phase & 1][0];
931 fx = &(*filter)[0][~phase & 1][0];
932 fo = &(*filter)[1][~phase & 1][0];
934 D0ptr = (void*)&D[0][ p];
935 D1ptr = (void*)&D[0][-p];
937 if(s & 1)
939 ptr = *D0ptr;
940 PROD_O(hi, lo, *fx, ptr+1);
941 MLN(hi, lo);
942 PROD_A(hi, lo, *fe, ptr);
943 pcm[0] = SHIFT(MLZ(hi, lo));
944 pcm += 16;
946 synth_full_odd_sbsample(pcm, fo, fe, D0ptr, D1ptr);
947 D0ptr += 15;
948 D1ptr += 15;
949 fo += 15;
950 fe += 15;
952 ptr = *(D0ptr + 1);
953 PROD_O(hi, lo, *fo, ptr+1);
954 pcm[0] = SHIFT(-MLZ(hi, lo));
956 else
958 ptr = *D0ptr;
959 PROD_O(hi, lo, *fx, ptr);
960 MLN(hi, lo);
961 PROD_A(hi, lo, *fe, ptr+1);
962 pcm[0] = SHIFT(MLZ(hi, lo));
963 pcm += 16;
965 synth_full_even_sbsample(pcm, fo, fe, D0ptr, D1ptr);
966 D0ptr += 15;
967 D1ptr += 15;
968 fo += 15;
969 fe += 15;
971 ptr = *(D0ptr + 1);
972 PROD_O(hi, lo, *fo, ptr);
973 pcm[0] = SHIFT(-MLZ(hi, lo));
976 pcm += 16;
977 phase = (phase + 1) % 16;
982 # else /* not FPM_COLDFIRE_EMAC and not FPM_ARM */
984 #define PROD_O(hi, lo, f, ptr, offset) \
985 ML0(hi, lo, (*f)[0], ptr[ 0+offset]); \
986 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
987 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
988 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
989 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
990 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
991 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
992 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
994 #define PROD_A(hi, lo, f, ptr, offset) \
995 MLA(hi, lo, (*f)[0], ptr[ 0+offset]); \
996 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
997 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
998 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
999 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
1000 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
1001 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
1002 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
1004 #define PROD_SB(hi, lo, ptr, offset, first_idx, last_idx) \
1005 ML0(hi, lo, (*fe)[0], ptr[first_idx]); \
1006 MLA(hi, lo, (*fe)[1], ptr[16+offset]); \
1007 MLA(hi, lo, (*fe)[2], ptr[18+offset]); \
1008 MLA(hi, lo, (*fe)[3], ptr[20+offset]); \
1009 MLA(hi, lo, (*fe)[4], ptr[22+offset]); \
1010 MLA(hi, lo, (*fe)[5], ptr[24+offset]); \
1011 MLA(hi, lo, (*fe)[6], ptr[26+offset]); \
1012 MLA(hi, lo, (*fe)[7], ptr[28+offset]); \
1013 MLA(hi, lo, (*fo)[7], ptr[29-offset]); \
1014 MLA(hi, lo, (*fo)[6], ptr[27-offset]); \
1015 MLA(hi, lo, (*fo)[5], ptr[25-offset]); \
1016 MLA(hi, lo, (*fo)[4], ptr[23-offset]); \
1017 MLA(hi, lo, (*fo)[3], ptr[21-offset]); \
1018 MLA(hi, lo, (*fo)[2], ptr[19-offset]); \
1019 MLA(hi, lo, (*fo)[1], ptr[17-offset]); \
1020 MLA(hi, lo, (*fo)[0], ptr[last_idx ]);
1022 static
1023 void synth_full(struct mad_synth *synth, struct mad_frame const *frame,
1024 unsigned int nch, unsigned int ns)
1026 int p, sb;
1027 unsigned int phase, ch, s;
1028 mad_fixed_t *pcm, (*filter)[2][2][16][8];
1029 mad_fixed_t (*sbsample)[36][32];
1030 mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
1031 mad_fixed_t const (*D0ptr)[32], *ptr;
1032 mad_fixed_t const (*D1ptr)[32];
1033 mad_fixed64hi_t hi;
1034 mad_fixed64lo_t lo;
1036 for (ch = 0; ch < nch; ++ch) {
1037 sbsample = &(*frame->sbsample_prev)[ch];
1038 filter = &synth->filter[ch];
1039 phase = synth->phase;
1040 pcm = synth->pcm.samples[ch];
1042 for (s = 0; s < ns; ++s) {
1043 dct32((*sbsample)[s], phase >> 1,
1044 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
1046 p = (phase - 1) & 0xf;
1048 /* calculate 32 samples */
1049 fe = &(*filter)[0][ phase & 1][0];
1050 fx = &(*filter)[0][~phase & 1][0];
1051 fo = &(*filter)[1][~phase & 1][0];
1053 D0ptr = (void*)&D[0][ p];
1054 D1ptr = (void*)&D[0][-p];
1056 if(s & 1)
1058 ptr = *D0ptr;
1059 PROD_O(hi, lo, fx, ptr, 1)
1060 MLN(hi, lo);
1061 PROD_A(hi, lo, fe, ptr, 0)
1062 pcm[0] = SHIFT(MLZ(hi, lo));
1063 pcm += 16;
1065 for (sb = 15; sb; sb--, fo++)
1067 ++fe;
1068 ++D0ptr;
1069 ++D1ptr;
1071 /* D[32 - sb][i] == -D[sb][31 - i] */
1072 ptr = *D0ptr;
1073 PROD_O(hi, lo, fo, ptr, 1)
1074 MLN(hi, lo);
1075 PROD_A(hi, lo, fe, ptr, 0)
1076 pcm[-sb] = SHIFT(MLZ(hi, lo));
1078 ptr = *D1ptr;
1079 PROD_SB(hi, lo, ptr, 1, 15, 30)
1080 pcm[sb] = SHIFT(MLZ(hi, lo));
1083 ptr = *(D0ptr + 1);
1084 PROD_O(hi, lo, fo, ptr, 1)
1085 pcm[0] = SHIFT(-MLZ(hi, lo));
1087 else
1089 ptr = *D0ptr;
1090 PROD_O(hi, lo, fx, ptr, 0)
1091 MLN(hi, lo);
1092 PROD_A(hi, lo, fe, ptr, 1)
1093 pcm[0] = SHIFT(MLZ(hi, lo));
1094 pcm += 16;
1096 for (sb = 15; sb; sb--, fo++)
1098 ++fe;
1099 ++D0ptr;
1100 ++D1ptr;
1102 /* D[32 - sb][i] == -D[sb][31 - i] */
1103 ptr = *D0ptr;
1104 PROD_O(hi, lo, fo, ptr, 0)
1105 MLN(hi, lo);
1106 PROD_A(hi, lo, fe, ptr, 1)
1107 pcm[-sb] = SHIFT(MLZ(hi, lo));
1109 ptr = *D1ptr;
1110 PROD_SB(hi, lo, ptr, 0, 30, 15)
1111 pcm[sb] = SHIFT(MLZ(hi, lo));
1114 ptr = *(D0ptr + 1);
1115 PROD_O(hi, lo, fo, ptr, 0)
1116 pcm[0] = SHIFT(-MLZ(hi, lo));
1119 pcm += 16;
1120 phase = (phase + 1) % 16;
1125 # endif
1126 # endif
1128 #if 0 /* rockbox: unused */
1130 * NAME: synth->half()
1131 * DESCRIPTION: perform half frequency PCM synthesis
1133 static
1134 void synth_half(struct mad_synth *synth, struct mad_frame const *frame,
1135 unsigned int nch, unsigned int ns)
1137 unsigned int phase, ch, s, sb, pe, po;
1138 mad_fixed_t *pcm1, *pcm2, (*filter)[2][2][16][8];
1139 mad_fixed_t (*sbsample)[36][32];
1140 register mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8];
1141 register mad_fixed_t const (*Dptr)[32], *ptr;
1142 register mad_fixed64hi_t hi;
1143 register mad_fixed64lo_t lo;
1145 for (ch = 0; ch < nch; ++ch) {
1146 sbsample = &(*frame->sbsample_prev)[ch];
1147 filter = &synth->filter[ch];
1148 phase = synth->phase;
1149 pcm1 = synth->pcm.samples[ch];
1151 for (s = 0; s < ns; ++s) {
1152 dct32((*sbsample)[s], phase >> 1,
1153 (*filter)[0][phase & 1], (*filter)[1][phase & 1]);
1155 pe = phase & ~1;
1156 po = ((phase - 1) & 0xf) | 1;
1158 /* calculate 16 samples */
1160 fe = &(*filter)[0][ phase & 1][0];
1161 fx = &(*filter)[0][~phase & 1][0];
1162 fo = &(*filter)[1][~phase & 1][0];
1164 Dptr = &D[0];
1166 ptr = *Dptr + po;
1167 ML0(hi, lo, (*fx)[0], ptr[ 0]);
1168 MLA(hi, lo, (*fx)[1], ptr[14]);
1169 MLA(hi, lo, (*fx)[2], ptr[12]);
1170 MLA(hi, lo, (*fx)[3], ptr[10]);
1171 MLA(hi, lo, (*fx)[4], ptr[ 8]);
1172 MLA(hi, lo, (*fx)[5], ptr[ 6]);
1173 MLA(hi, lo, (*fx)[6], ptr[ 4]);
1174 MLA(hi, lo, (*fx)[7], ptr[ 2]);
1175 MLN(hi, lo);
1177 ptr = *Dptr + pe;
1178 MLA(hi, lo, (*fe)[0], ptr[ 0]);
1179 MLA(hi, lo, (*fe)[1], ptr[14]);
1180 MLA(hi, lo, (*fe)[2], ptr[12]);
1181 MLA(hi, lo, (*fe)[3], ptr[10]);
1182 MLA(hi, lo, (*fe)[4], ptr[ 8]);
1183 MLA(hi, lo, (*fe)[5], ptr[ 6]);
1184 MLA(hi, lo, (*fe)[6], ptr[ 4]);
1185 MLA(hi, lo, (*fe)[7], ptr[ 2]);
1187 *pcm1++ = SHIFT(MLZ(hi, lo));
1189 pcm2 = pcm1 + 14;
1191 for (sb = 1; sb < 16; ++sb) {
1192 ++fe;
1193 ++Dptr;
1195 /* D[32 - sb][i] == -D[sb][31 - i] */
1197 if (!(sb & 1)) {
1198 ptr = *Dptr + po;
1199 ML0(hi, lo, (*fo)[0], ptr[ 0]);
1200 MLA(hi, lo, (*fo)[1], ptr[14]);
1201 MLA(hi, lo, (*fo)[2], ptr[12]);
1202 MLA(hi, lo, (*fo)[3], ptr[10]);
1203 MLA(hi, lo, (*fo)[4], ptr[ 8]);
1204 MLA(hi, lo, (*fo)[5], ptr[ 6]);
1205 MLA(hi, lo, (*fo)[6], ptr[ 4]);
1206 MLA(hi, lo, (*fo)[7], ptr[ 2]);
1207 MLN(hi, lo);
1209 ptr = *Dptr + pe;
1210 MLA(hi, lo, (*fe)[7], ptr[ 2]);
1211 MLA(hi, lo, (*fe)[6], ptr[ 4]);
1212 MLA(hi, lo, (*fe)[5], ptr[ 6]);
1213 MLA(hi, lo, (*fe)[4], ptr[ 8]);
1214 MLA(hi, lo, (*fe)[3], ptr[10]);
1215 MLA(hi, lo, (*fe)[2], ptr[12]);
1216 MLA(hi, lo, (*fe)[1], ptr[14]);
1217 MLA(hi, lo, (*fe)[0], ptr[ 0]);
1219 *pcm1++ = SHIFT(MLZ(hi, lo));
1221 ptr = *Dptr - po;
1222 ML0(hi, lo, (*fo)[7], ptr[31 - 2]);
1223 MLA(hi, lo, (*fo)[6], ptr[31 - 4]);
1224 MLA(hi, lo, (*fo)[5], ptr[31 - 6]);
1225 MLA(hi, lo, (*fo)[4], ptr[31 - 8]);
1226 MLA(hi, lo, (*fo)[3], ptr[31 - 10]);
1227 MLA(hi, lo, (*fo)[2], ptr[31 - 12]);
1228 MLA(hi, lo, (*fo)[1], ptr[31 - 14]);
1229 MLA(hi, lo, (*fo)[0], ptr[31 - 16]);
1231 ptr = *Dptr - pe;
1232 MLA(hi, lo, (*fe)[0], ptr[31 - 16]);
1233 MLA(hi, lo, (*fe)[1], ptr[31 - 14]);
1234 MLA(hi, lo, (*fe)[2], ptr[31 - 12]);
1235 MLA(hi, lo, (*fe)[3], ptr[31 - 10]);
1236 MLA(hi, lo, (*fe)[4], ptr[31 - 8]);
1237 MLA(hi, lo, (*fe)[5], ptr[31 - 6]);
1238 MLA(hi, lo, (*fe)[6], ptr[31 - 4]);
1239 MLA(hi, lo, (*fe)[7], ptr[31 - 2]);
1241 *pcm2-- = SHIFT(MLZ(hi, lo));
1244 ++fo;
1247 ++Dptr;
1249 ptr = *Dptr + po;
1250 ML0(hi, lo, (*fo)[0], ptr[ 0]);
1251 MLA(hi, lo, (*fo)[1], ptr[14]);
1252 MLA(hi, lo, (*fo)[2], ptr[12]);
1253 MLA(hi, lo, (*fo)[3], ptr[10]);
1254 MLA(hi, lo, (*fo)[4], ptr[ 8]);
1255 MLA(hi, lo, (*fo)[5], ptr[ 6]);
1256 MLA(hi, lo, (*fo)[6], ptr[ 4]);
1257 MLA(hi, lo, (*fo)[7], ptr[ 2]);
1259 *pcm1 = SHIFT(-MLZ(hi, lo));
1260 pcm1 += 8;
1262 phase = (phase + 1) % 16;
1266 #endif /* unused */
1269 * NAME: synth->frame()
1270 * DESCRIPTION: perform PCM synthesis of frame subband samples
1272 void mad_synth_frame(struct mad_synth *synth, struct mad_frame const *frame)
1274 unsigned int nch, ns;
1275 void (*synth_frame)(struct mad_synth *, struct mad_frame const *,
1276 unsigned int, unsigned int);
1278 nch = MAD_NCHANNELS(&frame->header);
1279 ns = MAD_NSBSAMPLES(&frame->header);
1281 synth->pcm.samplerate = frame->header.samplerate;
1282 synth->pcm.channels = nch;
1283 synth->pcm.length = 32 * ns;
1285 synth_frame = synth_full;
1287 #if 0 /* rockbox: unused */
1288 if (frame->options & MAD_OPTION_HALFSAMPLERATE) {
1289 synth->pcm.samplerate /= 2;
1290 synth->pcm.length /= 2;
1292 synth_frame = synth_half;
1294 #endif
1296 synth_frame(synth, frame, nch, ns);
1298 synth->phase = (synth->phase + ns) % 16;