2 * libmad - MPEG audio decoder library
3 * Copyright (C) 2000-2004 Underbit Technologies, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * DESCRIPTION: initialize synth struct
36 void mad_synth_init(struct mad_synth
*synth
)
38 mad_synth_mute(synth
);
42 synth
->pcm
.samplerate
= 0;
43 synth
->pcm
.channels
= 0;
44 synth
->pcm
.length
= 0;
45 #if defined(CPU_COLDFIRE)
46 /* init the emac unit here, since this function should always be called
47 before using libmad */
48 coldfire_set_macsr(EMAC_FRACTIONAL
| EMAC_SATURATE
| EMAC_ROUND
);
54 * DESCRIPTION: zero all polyphase filterbank values, resetting synthesis
56 void mad_synth_mute(struct mad_synth
*synth
)
58 memset(synth
->filter
, 0, sizeof(synth
->filter
));
61 #if 0 /* dct32 asm implementation is slower on current arm systems */
64 void dct32(mad_fixed_t
const in
[32], unsigned int slot
,
65 mad_fixed_t lo
[16][8], mad_fixed_t hi
[16][8]);
70 * An optional optimization called here the Subband Synthesis Optimization
71 * (SSO) improves the performance of subband synthesis at the expense of
74 * The idea is to simplify 32x32->64-bit multiplication to 32x32->32 such
75 * that extra scaling and rounding are not necessary. This often allows the
76 * compiler to use faster 32-bit multiply-accumulate instructions instead of
77 * explicit 64-bit multiply, shift, and add instructions.
79 * SSO works like this: a full 32x32->64-bit multiply of two mad_fixed_t
80 * values requires the result to be right-shifted 28 bits to be properly
81 * scaled to the same fixed-point format. Right shifts can be applied at any
82 * time to either operand or to the result, so the optimization involves
83 * careful placement of these shifts to minimize the loss of accuracy.
85 * First, a 14-bit shift is applied with rounding at compile-time to the D[]
86 * table of coefficients for the subband synthesis window. This only loses 2
87 * bits of accuracy because the lower 12 bits are always zero. A second
88 * 12-bit shift occurs after the DCT calculation. This loses 12 bits of
89 * accuracy. Finally, a third 2-bit shift occurs just before the sample is
90 * saved in the PCM buffer. 14 + 12 + 2 == 28 bits.
93 /* FPM_DEFAULT without OPT_SSO will actually not work. */
94 # if defined(FPM_DEFAULT) && !defined(OPT_SSO)
98 /* second SSO shift, with rounding */
100 # define SHIFT(x) (((x) + (1L << 11)) >> 12)
102 # define SHIFT(x) (x)
105 /* possible DCT speed optimization */
106 # if defined(FPM_COLDFIRE_EMAC)
107 /* This is a Coldfire version of the OPT_SPEED optimisation below, but in the
108 case of Coldfire it does not loose additional precision. */
111 mad_fixed64hi_t hi; \
112 asm volatile("mac.l %[a], %[b], %%acc0\n\t" \
113 "movclr.l %%acc0, %[hi]" \
115 : [a] "r" ((x)), [b] "r" ((y))); \
118 # elif defined(FPM_ARM)
119 /* This is an ARM version of the OPT_SPEED optimisation below. This
120 implementation will loose 1 bit of accuracy. */
123 mad_fixed64lo_t lo; \
124 mad_fixed64hi_t hi; \
126 "smull %[lo], %[hi], %[a], %[b]\n\t" /* hi = result */ \
127 "mov %[hi], %[hi], lsl #1" /* hi <<= 1 */ \
128 : [lo]"=&r"(lo), [hi]"=&r"(hi) \
129 : [a]"r"(x), [b]"r"(y)); \
132 # elif defined(OPT_SPEED) && defined(MAD_F_MLX)
134 ({ mad_fixed64hi_t hi; \
135 mad_fixed64lo_t lo; \
136 MAD_F_MLX(hi, lo, (x), (y)); \
137 hi << (32 - MAD_F_SCALEBITS - 3); \
140 # define MUL(x, y) mad_f_mul((x), (y>>3))
145 * DESCRIPTION: perform fast in[32]->out[32] DCT
148 void dct32(mad_fixed_t
const in
[32], unsigned int slot
,
149 mad_fixed_t lo
[16][8], mad_fixed_t hi
[16][8])
151 mad_fixed_t t0
, t1
, t2
, t3
, t4
, t5
, t6
, t7
;
152 mad_fixed_t t8
, t9
, t10
, t11
, t12
, t13
, t14
, t15
;
153 mad_fixed_t t16
, t17
, t18
, t19
, t20
, t21
, t22
, t23
;
154 mad_fixed_t t24
, t25
, t26
, t27
, t28
, t29
, t30
, t31
;
155 mad_fixed_t t32
, t33
, t34
, t35
, t36
, t37
, t38
, t39
;
156 mad_fixed_t t40
, t41
, t42
, t43
, t44
, t45
, t46
, t47
;
157 mad_fixed_t t48
, t49
, t50
, t51
, t52
, t53
, t54
, t55
;
158 mad_fixed_t t56
, t57
, t58
, t59
, t60
, t61
, t62
, t63
;
159 mad_fixed_t t64
, t65
, t66
, t67
, t68
, t69
, t70
, t71
;
160 mad_fixed_t t72
, t73
, t74
, t75
, t76
, t77
, t78
, t79
;
161 mad_fixed_t t80
, t81
, t82
, t83
, t84
, t85
, t86
, t87
;
162 mad_fixed_t t88
, t89
, t90
, t91
, t92
, t93
, t94
, t95
;
163 mad_fixed_t t96
, t97
, t98
, t99
, t100
, t101
, t102
, t103
;
164 mad_fixed_t t104
, t105
, t106
, t107
, t108
, t109
, t110
, t111
;
165 mad_fixed_t t112
, t113
, t114
, t115
, t116
, t117
, t118
, t119
;
166 mad_fixed_t t120
, t121
, t122
, t123
, t124
, t125
, t126
, t127
;
167 mad_fixed_t t128
, t129
, t130
, t131
, t132
, t133
, t134
, t135
;
168 mad_fixed_t t136
, t137
, t138
, t139
, t140
, t141
, t142
, t143
;
169 mad_fixed_t t144
, t145
, t146
, t147
, t148
, t149
, t150
, t151
;
170 mad_fixed_t t152
, t153
, t154
, t155
, t156
, t157
, t158
, t159
;
171 mad_fixed_t t160
, t161
, t162
, t163
, t164
, t165
, t166
, t167
;
172 mad_fixed_t t168
, t169
, t170
, t171
, t172
, t173
, t174
, t175
;
175 /* costab[i] = cos(PI / (2 * 32) * i) */
176 #define costab1 MAD_F(0x7fd8878e) /* 0.998795456 */
177 #define costab2 MAD_F(0x7f62368f) /* 0.995184727 */
178 #define costab3 MAD_F(0x7e9d55fc) /* 0.989176510 */
179 #define costab4 MAD_F(0x7d8a5f40) /* 0.980785280 */
180 #define costab5 MAD_F(0x7c29fbee) /* 0.970031253 */
181 #define costab6 MAD_F(0x7a7d055b) /* 0.956940336 */
182 #define costab7 MAD_F(0x78848414) /* 0.941544065 */
183 #define costab8 MAD_F(0x7641af3d) /* 0.923879533 */
184 #define costab9 MAD_F(0x73b5ebd1) /* 0.903989293 */
185 #define costab10 MAD_F(0x70e2cbc6) /* 0.881921264 */
186 #define costab11 MAD_F(0x6dca0d14) /* 0.857728610 */
187 #define costab12 MAD_F(0x6a5d98a4) /* 0.831469612 */
188 #define costab13 MAD_F(0x66cf8120) /* 0.803207531 */
189 #define costab14 MAD_F(0x62f201ac) /* 0.773010453 */
190 #define costab15 MAD_F(0x5ed77c8a) /* 0.740951125 */
191 #define costab16 MAD_F(0x5a82799a) /* 0.707106781 */
192 #define costab17 MAD_F(0x55f5a4d2) /* 0.671558955 */
193 #define costab18 MAD_F(0x5133cc94) /* 0.634393284 */
194 #define costab19 MAD_F(0x4c3fdff4) /* 0.595699304 */
195 #define costab20 MAD_F(0x471cece7) /* 0.555570233 */
196 #define costab21 MAD_F(0x41ce1e65) /* 0.514102744 */
197 #define costab22 MAD_F(0x3c56ba70) /* 0.471396737 */
198 #define costab23 MAD_F(0x36ba2014) /* 0.427555093 */
199 #define costab24 MAD_F(0x30fbc54d) /* 0.382683432 */
200 #define costab25 MAD_F(0x2b1f34eb) /* 0.336889853 */
201 #define costab26 MAD_F(0x25280c5e) /* 0.290284677 */
202 #define costab27 MAD_F(0x1f19f97b) /* 0.242980180 */
203 #define costab28 MAD_F(0x18f8b83c) /* 0.195090322 */
204 #define costab29 MAD_F(0x12c8106f) /* 0.146730474 */
205 #define costab30 MAD_F(0x0c8bd35e) /* 0.098017140 */
206 #define costab31 MAD_F(0x0647d97c) /* 0.049067674 */
208 t0
= in
[0] + in
[31]; t16
= MUL(in
[0] - in
[31], costab1
);
209 t1
= in
[15] + in
[16]; t17
= MUL(in
[15] - in
[16], costab31
);
212 t59
= MUL(t16
- t17
, costab2
);
214 t50
= MUL(t0
- t1
, costab2
);
216 t2
= in
[7] + in
[24]; t18
= MUL(in
[7] - in
[24], costab15
);
217 t3
= in
[8] + in
[23]; t19
= MUL(in
[8] - in
[23], costab17
);
220 t60
= MUL(t18
- t19
, costab30
);
222 t51
= MUL(t2
- t3
, costab30
);
224 t4
= in
[3] + in
[28]; t20
= MUL(in
[3] - in
[28], costab7
);
225 t5
= in
[12] + in
[19]; t21
= MUL(in
[12] - in
[19], costab25
);
228 t61
= MUL(t20
- t21
, costab14
);
230 t52
= MUL(t4
- t5
, costab14
);
232 t6
= in
[4] + in
[27]; t22
= MUL(in
[4] - in
[27], costab9
);
233 t7
= in
[11] + in
[20]; t23
= MUL(in
[11] - in
[20], costab23
);
236 t62
= MUL(t22
- t23
, costab18
);
238 t53
= MUL(t6
- t7
, costab18
);
240 t8
= in
[1] + in
[30]; t24
= MUL(in
[1] - in
[30], costab3
);
241 t9
= in
[14] + in
[17]; t25
= MUL(in
[14] - in
[17], costab29
);
244 t63
= MUL(t24
- t25
, costab6
);
246 t54
= MUL(t8
- t9
, costab6
);
248 t10
= in
[6] + in
[25]; t26
= MUL(in
[6] - in
[25], costab13
);
249 t11
= in
[9] + in
[22]; t27
= MUL(in
[9] - in
[22], costab19
);
252 t64
= MUL(t26
- t27
, costab26
);
254 t55
= MUL(t10
- t11
, costab26
);
256 t12
= in
[2] + in
[29]; t28
= MUL(in
[2] - in
[29], costab5
);
257 t13
= in
[13] + in
[18]; t29
= MUL(in
[13] - in
[18], costab27
);
260 t65
= MUL(t28
- t29
, costab10
);
262 t56
= MUL(t12
- t13
, costab10
);
264 t14
= in
[5] + in
[26]; t30
= MUL(in
[5] - in
[26], costab11
);
265 t15
= in
[10] + in
[21]; t31
= MUL(in
[10] - in
[21], costab21
);
268 t66
= MUL(t30
- t31
, costab22
);
270 t57
= MUL(t14
- t15
, costab22
);
272 t69
= t33
+ t34
; t89
= MUL(t33
- t34
, costab4
);
273 t70
= t35
+ t36
; t90
= MUL(t35
- t36
, costab28
);
274 t71
= t37
+ t38
; t91
= MUL(t37
- t38
, costab12
);
275 t72
= t39
+ t40
; t92
= MUL(t39
- t40
, costab20
);
276 t73
= t41
+ t42
; t94
= MUL(t41
- t42
, costab4
);
277 t74
= t43
+ t44
; t95
= MUL(t43
- t44
, costab28
);
278 t75
= t45
+ t46
; t96
= MUL(t45
- t46
, costab12
);
279 t76
= t47
+ t48
; t97
= MUL(t47
- t48
, costab20
);
281 t78
= t50
+ t51
; t100
= MUL(t50
- t51
, costab4
);
282 t79
= t52
+ t53
; t101
= MUL(t52
- t53
, costab28
);
283 t80
= t54
+ t55
; t102
= MUL(t54
- t55
, costab12
);
284 t81
= t56
+ t57
; t103
= MUL(t56
- t57
, costab20
);
286 t83
= t59
+ t60
; t106
= MUL(t59
- t60
, costab4
);
287 t84
= t61
+ t62
; t107
= MUL(t61
- t62
, costab28
);
288 t85
= t63
+ t64
; t108
= MUL(t63
- t64
, costab12
);
289 t86
= t65
+ t66
; t109
= MUL(t65
- t66
, costab20
);
294 /* 0 */ hi
[15][slot
] = SHIFT(t113
+ t114
);
295 /* 16 */ lo
[ 0][slot
] = SHIFT(MUL(t113
- t114
, costab16
));
302 /* 1 */ hi
[14][slot
] = SHIFT(t32
);
309 /* 2 */ hi
[13][slot
] = SHIFT(t58
);
316 t49
= (t67
* 2) - t32
;
318 /* 3 */ hi
[12][slot
] = SHIFT(t49
);
325 /* 4 */ hi
[11][slot
] = SHIFT(t93
);
332 t68
= (t98
* 2) - t49
;
334 /* 5 */ hi
[10][slot
] = SHIFT(t68
);
341 t82
= (t104
* 2) - t58
;
343 /* 6 */ hi
[ 9][slot
] = SHIFT(t82
);
350 t87
= (t110
* 2) - t67
;
352 t77
= (t87
* 2) - t68
;
354 /* 7 */ hi
[ 8][slot
] = SHIFT(t77
);
356 t141
= MUL(t69
- t70
, costab8
);
357 t142
= MUL(t71
- t72
, costab24
);
360 /* 8 */ hi
[ 7][slot
] = SHIFT(t143
);
361 /* 24 */ lo
[ 8][slot
] =
362 SHIFT((MUL(t141
- t142
, costab16
) * 2) - t143
);
364 t144
= MUL(t73
- t74
, costab8
);
365 t145
= MUL(t75
- t76
, costab24
);
368 t88
= (t146
* 2) - t77
;
370 /* 9 */ hi
[ 6][slot
] = SHIFT(t88
);
372 t148
= MUL(t78
- t79
, costab8
);
373 t149
= MUL(t80
- t81
, costab24
);
376 t105
= (t150
* 2) - t82
;
378 /* 10 */ hi
[ 5][slot
] = SHIFT(t105
);
380 t152
= MUL(t83
- t84
, costab8
);
381 t153
= MUL(t85
- t86
, costab24
);
384 t111
= (t154
* 2) - t87
;
386 t99
= (t111
* 2) - t88
;
388 /* 11 */ hi
[ 4][slot
] = SHIFT(t99
);
390 t157
= MUL(t89
- t90
, costab8
);
391 t158
= MUL(t91
- t92
, costab24
);
394 t127
= (t159
* 2) - t93
;
396 /* 12 */ hi
[ 3][slot
] = SHIFT(t127
);
398 t160
= (MUL(t125
- t126
, costab16
) * 2) - t127
;
400 /* 20 */ lo
[ 4][slot
] = SHIFT(t160
);
401 /* 28 */ lo
[12][slot
] =
402 SHIFT((((MUL(t157
- t158
, costab16
) * 2) - t159
) * 2) - t160
);
404 t161
= MUL(t94
- t95
, costab8
);
405 t162
= MUL(t96
- t97
, costab24
);
408 t130
= (t163
* 2) - t98
;
410 t112
= (t130
* 2) - t99
;
412 /* 13 */ hi
[ 2][slot
] = SHIFT(t112
);
414 t164
= (MUL(t128
- t129
, costab16
) * 2) - t130
;
416 t166
= MUL(t100
- t101
, costab8
);
417 t167
= MUL(t102
- t103
, costab24
);
420 t134
= (t168
* 2) - t104
;
422 t120
= (t134
* 2) - t105
;
424 /* 14 */ hi
[ 1][slot
] = SHIFT(t120
);
426 t135
= (MUL(t118
- t119
, costab16
) * 2) - t120
;
428 /* 18 */ lo
[ 2][slot
] = SHIFT(t135
);
430 t169
= (MUL(t132
- t133
, costab16
) * 2) - t134
;
432 t151
= (t169
* 2) - t135
;
434 /* 22 */ lo
[ 6][slot
] = SHIFT(t151
);
436 t170
= (((MUL(t148
- t149
, costab16
) * 2) - t150
) * 2) - t151
;
438 /* 26 */ lo
[10][slot
] = SHIFT(t170
);
439 /* 30 */ lo
[14][slot
] =
440 SHIFT((((((MUL(t166
- t167
, costab16
) * 2) -
441 t168
) * 2) - t169
) * 2) - t170
);
443 t171
= MUL(t106
- t107
, costab8
);
444 t172
= MUL(t108
- t109
, costab24
);
447 t138
= (t173
* 2) - t110
;
449 t123
= (t138
* 2) - t111
;
451 t139
= (MUL(t121
- t122
, costab16
) * 2) - t123
;
453 t117
= (t123
* 2) - t112
;
455 /* 15 */ hi
[ 0][slot
] = SHIFT(t117
);
457 t124
= (MUL(t115
- t116
, costab16
) * 2) - t117
;
459 /* 17 */ lo
[ 1][slot
] = SHIFT(t124
);
461 t131
= (t139
* 2) - t124
;
463 /* 19 */ lo
[ 3][slot
] = SHIFT(t131
);
465 t140
= (t164
* 2) - t131
;
467 /* 21 */ lo
[ 5][slot
] = SHIFT(t140
);
469 t174
= (MUL(t136
- t137
, costab16
) * 2) - t138
;
471 t155
= (t174
* 2) - t139
;
473 t147
= (t155
* 2) - t140
;
475 /* 23 */ lo
[ 7][slot
] = SHIFT(t147
);
477 t156
= (((MUL(t144
- t145
, costab16
) * 2) - t146
) * 2) - t147
;
479 /* 25 */ lo
[ 9][slot
] = SHIFT(t156
);
481 t175
= (((MUL(t152
- t153
, costab16
) * 2) - t154
) * 2) - t155
;
483 t165
= (t175
* 2) - t156
;
485 /* 27 */ lo
[11][slot
] = SHIFT(t165
);
487 t176
= (((((MUL(t161
- t162
, costab16
) * 2) -
488 t163
) * 2) - t164
) * 2) - t165
;
490 /* 29 */ lo
[13][slot
] = SHIFT(t176
);
491 /* 31 */ lo
[15][slot
] =
492 SHIFT((((((((MUL(t171
- t172
, costab16
) * 2) -
493 t173
) * 2) - t174
) * 2) - t175
) * 2) - t176
);
500 * 49 shifts (not counting SSO)
509 /* third SSO shift and/or D[] optimization preshift */
511 # if defined(OPT_SSO)
512 # if MAD_F_FRACBITS != 28
513 # error "MAD_F_FRACBITS must be 28 to use OPT_SSO"
515 # define ML0(hi, lo, x, y) ((lo) = (x) * (y))
516 # define MLA(hi, lo, x, y) ((lo) += (x) * (y))
517 # define MLN(hi, lo) ((lo) = -(lo))
518 # define MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo))
519 # define SHIFT(x) ((x) >> 2)
520 # define PRESHIFT(x) ((MAD_F(x) + (1L << 13)) >> 14)
522 # define ML0(hi, lo, x, y) MAD_F_ML0((hi), (lo), (x), (y))
523 # define MLA(hi, lo, x, y) MAD_F_MLA((hi), (lo), (x), (y))
524 # define MLN(hi, lo) MAD_F_MLN((hi), (lo))
525 # define MLZ(hi, lo) MAD_F_MLZ((hi), (lo))
526 # define SHIFT(x) (x)
527 # if defined(MAD_F_SCALEBITS)
528 # undef MAD_F_SCALEBITS
529 # define MAD_F_SCALEBITS (MAD_F_FRACBITS - 12)
530 # define PRESHIFT(x) (MAD_F(x) >> 12)
532 # define PRESHIFT(x) MAD_F(x)
537 mad_fixed_t
const D
[17][32] ICONST_ATTR MEM_ALIGN_ATTR
= {
542 * NAME: synth->full()
543 * DESCRIPTION: perform full frequency PCM synthesis
545 /* optimised version of synth_full */
546 # ifdef FPM_COLDFIRE_EMAC
548 #define SYNTH_EMAC1(res, f1, pD) \
550 "movem.l (%0), %%d0-%%d7 \n\t" \
551 "move.l (%1), %%a5 \n\t" \
552 "mac.l %%d0, %%a5, 56(%1), %%a5, %%acc0\n\t" \
553 "mac.l %%d1, %%a5, 48(%1), %%a5, %%acc0\n\t" \
554 "mac.l %%d2, %%a5, 40(%1), %%a5, %%acc0\n\t" \
555 "mac.l %%d3, %%a5, 32(%1), %%a5, %%acc0\n\t" \
556 "mac.l %%d4, %%a5, 24(%1), %%a5, %%acc0\n\t" \
557 "mac.l %%d5, %%a5, 16(%1), %%a5, %%acc0\n\t" \
558 "mac.l %%d6, %%a5, 8(%1), %%a5, %%acc0\n\t" \
559 "mac.l %%d7, %%a5, %%acc0\n\t" \
561 : "a" (*f1), "a" (*pD) \
562 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5"); \
564 "movclr.l %%acc0, %0 \n\t" \
567 #define SYNTH_EMAC2(res, f1, f2, pD) \
569 "movem.l (%0), %%d0-%%d7 \n\t" \
570 "move.l 4(%1), %%a5 \n\t" \
571 "msac.l %%d0, %%a5, 60(%1), %%a5, %%acc0\n\t" \
572 "msac.l %%d1, %%a5, 52(%1), %%a5, %%acc0\n\t" \
573 "msac.l %%d2, %%a5, 44(%1), %%a5, %%acc0\n\t" \
574 "msac.l %%d3, %%a5, 36(%1), %%a5, %%acc0\n\t" \
575 "msac.l %%d4, %%a5, 28(%1), %%a5, %%acc0\n\t" \
576 "msac.l %%d5, %%a5, 20(%1), %%a5, %%acc0\n\t" \
577 "msac.l %%d6, %%a5, 12(%1), %%a5, %%acc0\n\t" \
578 "msac.l %%d7, %%a5, (%1), %%a5, %%acc0\n\t" \
579 "movem.l (%2), %%d0-%%d7 \n\t" \
580 "mac.l %%d0, %%a5, 56(%1), %%a5, %%acc0\n\t" \
581 "mac.l %%d1, %%a5, 48(%1), %%a5, %%acc0\n\t" \
582 "mac.l %%d2, %%a5, 40(%1), %%a5, %%acc0\n\t" \
583 "mac.l %%d3, %%a5, 32(%1), %%a5, %%acc0\n\t" \
584 "mac.l %%d4, %%a5, 24(%1), %%a5, %%acc0\n\t" \
585 "mac.l %%d5, %%a5, 16(%1), %%a5, %%acc0\n\t" \
586 "mac.l %%d6, %%a5, 8(%1), %%a5, %%acc0\n\t" \
587 "mac.l %%d7, %%a5, %%acc0\n\t" \
589 : "a" (*f1), "a" (*pD), "a" (*f2) \
590 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
592 "movclr.l %%acc0, %0 \n\t" \
595 #define SYNTH_EMAC_ODD_SBSAMPLE(f1, f2, pD1, pD2, res1, res2) \
597 "movem.l (%0), %%d0-%%d7 \n\t" \
598 "move.l 4(%2), %%a5 \n\t" \
599 "msac.l %%d0, %%a5, 60(%2), %%a5, %%acc0\n\t" \
600 "msac.l %%d1, %%a5, 52(%2), %%a5, %%acc0\n\t" \
601 "msac.l %%d2, %%a5, 44(%2), %%a5, %%acc0\n\t" \
602 "msac.l %%d3, %%a5, 36(%2), %%a5, %%acc0\n\t" \
603 "msac.l %%d4, %%a5, 28(%2), %%a5, %%acc0\n\t" \
604 "msac.l %%d5, %%a5, 20(%2), %%a5, %%acc0\n\t" \
605 "msac.l %%d6, %%a5, 12(%2), %%a5, %%acc0\n\t" \
606 "msac.l %%d7, %%a5, 112(%3), %%a5, %%acc0\n\t" \
607 "mac.l %%d7, %%a5, 104(%3), %%a5, %%acc1\n\t" \
608 "mac.l %%d6, %%a5, 96(%3), %%a5, %%acc1\n\t" \
609 "mac.l %%d5, %%a5, 88(%3), %%a5, %%acc1\n\t" \
610 "mac.l %%d4, %%a5, 80(%3), %%a5, %%acc1\n\t" \
611 "mac.l %%d3, %%a5, 72(%3), %%a5, %%acc1\n\t" \
612 "mac.l %%d2, %%a5, 64(%3), %%a5, %%acc1\n\t" \
613 "mac.l %%d1, %%a5, 120(%3), %%a5, %%acc1\n\t" \
614 "mac.l %%d0, %%a5, 8(%2), %%a5, %%acc1\n\t" \
615 "movem.l (%1), %%d0-%%d7 \n\t" \
616 "mac.l %%d7, %%a5, 16(%2), %%a5, %%acc0\n\t" \
617 "mac.l %%d6, %%a5, 24(%2), %%a5, %%acc0\n\t" \
618 "mac.l %%d5, %%a5, 32(%2), %%a5, %%acc0\n\t" \
619 "mac.l %%d4, %%a5, 40(%2), %%a5, %%acc0\n\t" \
620 "mac.l %%d3, %%a5, 48(%2), %%a5, %%acc0\n\t" \
621 "mac.l %%d2, %%a5, 56(%2), %%a5, %%acc0\n\t" \
622 "mac.l %%d1, %%a5, (%2), %%a5, %%acc0\n\t" \
623 "mac.l %%d0, %%a5, 60(%3), %%a5, %%acc0\n\t" \
624 "mac.l %%d0, %%a5, 68(%3), %%a5, %%acc1\n\t" \
625 "mac.l %%d1, %%a5, 76(%3), %%a5, %%acc1\n\t" \
626 "mac.l %%d2, %%a5, 84(%3), %%a5, %%acc1\n\t" \
627 "mac.l %%d3, %%a5, 92(%3), %%a5, %%acc1\n\t" \
628 "mac.l %%d4, %%a5, 100(%3), %%a5, %%acc1\n\t" \
629 "mac.l %%d5, %%a5, 108(%3), %%a5, %%acc1\n\t" \
630 "mac.l %%d6, %%a5, 116(%3), %%a5, %%acc1\n\t" \
631 "mac.l %%d7, %%a5, %%acc1\n\t" \
633 : "a" (*f1), "a" (*f2), "a" (*pD1), "a" (*pD2) \
634 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
636 "movclr.l %%acc0, %0\n\t" \
637 "movclr.l %%acc1, %1\n\t" \
638 : "=d" (res1), "=d" (res2) );
640 #define SYNTH_EMAC_EVEN_SBSAMPLE(f1, f2, pD1, pD2, res1, res2) \
642 "movem.l (%0), %%d0-%%d7 \n\t" \
643 "move.l (%2), %%a5 \n\t" \
644 "msac.l %%d0, %%a5, 56(%2), %%a5, %%acc0\n\t" \
645 "msac.l %%d1, %%a5, 48(%2), %%a5, %%acc0\n\t" \
646 "msac.l %%d2, %%a5, 40(%2), %%a5, %%acc0\n\t" \
647 "msac.l %%d3, %%a5, 32(%2), %%a5, %%acc0\n\t" \
648 "msac.l %%d4, %%a5, 24(%2), %%a5, %%acc0\n\t" \
649 "msac.l %%d5, %%a5, 16(%2), %%a5, %%acc0\n\t" \
650 "msac.l %%d6, %%a5, 8(%2), %%a5, %%acc0\n\t" \
651 "msac.l %%d7, %%a5, 116(%3), %%a5, %%acc0\n\t" \
652 "mac.l %%d7, %%a5, 108(%3), %%a5, %%acc1\n\t" \
653 "mac.l %%d6, %%a5, 100(%3), %%a5, %%acc1\n\t" \
654 "mac.l %%d5, %%a5, 92(%3), %%a5, %%acc1\n\t" \
655 "mac.l %%d4, %%a5, 84(%3), %%a5, %%acc1\n\t" \
656 "mac.l %%d3, %%a5, 76(%3), %%a5, %%acc1\n\t" \
657 "mac.l %%d2, %%a5, 68(%3), %%a5, %%acc1\n\t" \
658 "mac.l %%d1, %%a5, 60(%3), %%a5, %%acc1\n\t" \
659 "mac.l %%d0, %%a5, 12(%2), %%a5, %%acc1\n\t" \
660 "movem.l (%1), %%d0-%%d7 \n\t" \
661 "mac.l %%d7, %%a5, 20(%2), %%a5, %%acc0\n\t" \
662 "mac.l %%d6, %%a5, 28(%2), %%a5, %%acc0\n\t" \
663 "mac.l %%d5, %%a5, 36(%2), %%a5, %%acc0\n\t" \
664 "mac.l %%d4, %%a5, 44(%2), %%a5, %%acc0\n\t" \
665 "mac.l %%d3, %%a5, 52(%2), %%a5, %%acc0\n\t" \
666 "mac.l %%d2, %%a5, 60(%2), %%a5, %%acc0\n\t" \
667 "mac.l %%d1, %%a5, 4(%2), %%a5, %%acc0\n\t" \
668 "mac.l %%d0, %%a5, 120(%3), %%a5, %%acc0\n\t" \
669 "mac.l %%d0, %%a5, 64(%3), %%a5, %%acc1\n\t" \
670 "mac.l %%d1, %%a5, 72(%3), %%a5, %%acc1\n\t" \
671 "mac.l %%d2, %%a5, 80(%3), %%a5, %%acc1\n\t" \
672 "mac.l %%d3, %%a5, 88(%3), %%a5, %%acc1\n\t" \
673 "mac.l %%d4, %%a5, 96(%3), %%a5, %%acc1\n\t" \
674 "mac.l %%d5, %%a5, 104(%3), %%a5, %%acc1\n\t" \
675 "mac.l %%d6, %%a5, 112(%3), %%a5, %%acc1\n\t" \
676 "mac.l %%d7, %%a5, %%acc1\n\t" \
678 : "a" (*f1), "a" (*f2), "a" (*pD1), "a" (*pD2) \
679 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a5", "memory"); \
681 "movclr.l %%acc0, %0\n\t" \
682 "movclr.l %%acc1, %1\n\t" \
683 : "=d" (res1), "=d" (res2) );
686 void synth_full(struct mad_synth
*synth
, struct mad_frame
const *frame
,
687 unsigned int nch
, unsigned int ns
)
690 unsigned int phase
, ch
, s
, p
;
691 mad_fixed_t
*pcm
, (*filter
)[2][2][16][8];
692 mad_fixed_t (*sbsample
)[36][32];
693 mad_fixed_t (*fe
)[8], (*fx
)[8], (*fo
)[8];
694 mad_fixed_t
const (*D0ptr
)[32];
695 mad_fixed_t
const (*D1ptr
)[32];
696 mad_fixed64hi_t hi0
, hi1
;
698 for (ch
= 0; ch
< nch
; ++ch
) {
699 sbsample
= &(*frame
->sbsample_prev
)[ch
];
700 filter
= &synth
->filter
[ch
];
701 phase
= synth
->phase
;
702 pcm
= synth
->pcm
.samples
[ch
];
704 for (s
= 0; s
< ns
; ++s
) {
705 dct32((*sbsample
)[s
], phase
>> 1,
706 (*filter
)[0][phase
& 1], (*filter
)[1][phase
& 1]);
708 p
= (phase
- 1) & 0xf;
710 /* calculate 32 samples */
711 fe
= &(*filter
)[0][ phase
& 1][0];
712 fx
= &(*filter
)[0][~phase
& 1][0];
713 fo
= &(*filter
)[1][~phase
& 1][0];
715 D0ptr
= (void*)&D
[0][ p
];
716 D1ptr
= (void*)&D
[0][-p
];
720 SYNTH_EMAC2(hi0
, fx
, fe
, D0ptr
);
721 pcm
[0] = hi0
<< 3; /* shift result to libmad's fixed point format */
724 for (sb
= 15; sb
; sb
--, fo
++) {
729 /* D[32 - sb][i] == -D[sb][31 - i] */
730 SYNTH_EMAC_ODD_SBSAMPLE(fo
, fe
, D0ptr
, D1ptr
, hi0
, hi1
);
736 SYNTH_EMAC1(hi0
, fo
, D0ptr
+1);
737 pcm
[0] = -(hi0
<< 3);
741 SYNTH_EMAC2(hi0
, fe
, fx
, D0ptr
);
742 pcm
[0] = -(hi0
<< 3); /* shift result to libmad's fixed point format */
745 for (sb
= 15; sb
; sb
--, fo
++) {
750 /* D[32 - sb][i] == -D[sb][31 - i] */
751 SYNTH_EMAC_EVEN_SBSAMPLE(fo
, fe
, D0ptr
, D1ptr
, hi0
, hi1
);
757 SYNTH_EMAC1(hi0
, fo
, D0ptr
);
758 pcm
[0] = -(hi0
<< 3);
761 phase
= (phase
+ 1) % 16;
766 #elif defined(FPM_ARM)
768 #define PROD_O(hi, lo, f, ptr) \
770 mad_fixed_t *__p = (f); \
771 asm("ldmia %2!, {r0, r1, r2, r3}\n\t" \
772 "ldr r4, [%3, #0]\n\t" \
773 "ldr r12, [%3, #56]\n\t" \
774 "smull %0, %1, r0, r4\n\t" \
775 "ldr r4, [%3, #48]\n\t" \
776 "smlal %0, %1, r1, r12\n\t" \
777 "ldr r12, [%3, #40]\n\t" \
778 "smlal %0, %1, r2, r4\n\t" \
779 "smlal %0, %1, r3, r12\n\t" \
780 "ldmia %2, {r0, r1, r2, r3}\n\t" \
781 "ldr r4, [%3, #32]\n\t" \
782 "ldr r12, [%3, #24]\n\t" \
783 "smlal %0, %1, r0, r4\n\t" \
784 "ldr r4, [%3, #16]\n\t" \
785 "smlal %0, %1, r1, r12\n\t" \
786 "ldr r12, [%3, #8]\n\t" \
787 "smlal %0, %1, r2, r4\n\t" \
788 "smlal %0, %1, r3, r12\n\t" \
789 : "=&r" (lo), "=&r" (hi), "+r" (__p) \
791 : "r0", "r1", "r2", "r3", "r4", "r12"); \
794 #define PROD_A(hi, lo, f, ptr) \
796 mad_fixed_t *__p = (f); \
797 asm("ldmia %2!, {r0, r1, r2, r3}\n\t" \
798 "ldr r4, [%3, #0]\n\t" \
799 "ldr r12, [%3, #56]\n\t" \
800 "smlal %0, %1, r0, r4\n\t" \
801 "ldr r4, [%3, #48]\n\t" \
802 "smlal %0, %1, r1, r12\n\t" \
803 "ldr r12, [%3, #40]\n\t" \
804 "smlal %0, %1, r2, r4\n\t" \
805 "smlal %0, %1, r3, r12\n\t" \
806 "ldmia %2, {r0, r1, r2, r3}\n\t" \
807 "ldr r4, [%3, #32]\n\t" \
808 "ldr r12, [%3, #24]\n\t" \
809 "smlal %0, %1, r0, r4\n\t" \
810 "ldr r4, [%3, #16]\n\t" \
811 "smlal %0, %1, r1, r12\n\t" \
812 "ldr r12, [%3, #8]\n\t" \
813 "smlal %0, %1, r2, r4\n\t" \
814 "smlal %0, %1, r3, r12\n\t" \
815 : "+r" (lo), "+r" (hi), "+r" (__p) \
817 : "r0", "r1", "r2", "r3", "r4", "r12"); \
820 void synth_full_odd_sbsample (mad_fixed_t
*pcm
,
821 mad_fixed_t (*fo
)[8],
822 mad_fixed_t (*fe
)[8],
823 mad_fixed_t
const (*D0ptr
)[32],
824 mad_fixed_t
const (*D1ptr
)[32]);
825 void synth_full_even_sbsample(mad_fixed_t
*pcm
,
826 mad_fixed_t (*fo
)[8],
827 mad_fixed_t (*fe
)[8],
828 mad_fixed_t
const (*D0ptr
)[32],
829 mad_fixed_t
const (*D1ptr
)[32]);
832 void synth_full(struct mad_synth
*synth
, struct mad_frame
const *frame
,
833 unsigned int nch
, unsigned int ns
) ICODE_ATTR_MPA_SYNTH
;
835 void synth_full(struct mad_synth
*synth
, struct mad_frame
const *frame
,
836 unsigned int nch
, unsigned int ns
)
839 unsigned int phase
, ch
, s
;
840 mad_fixed_t
*pcm
, (*filter
)[2][2][16][8];
841 mad_fixed_t (*sbsample
)[36][32];
842 mad_fixed_t (*fe
)[8], (*fx
)[8], (*fo
)[8];
843 mad_fixed_t
const (*D0ptr
)[32], *ptr
;
844 mad_fixed_t
const (*D1ptr
)[32];
848 for (ch
= 0; ch
< nch
; ++ch
) {
849 sbsample
= &(*frame
->sbsample_prev
)[ch
];
850 filter
= &synth
->filter
[ch
];
851 phase
= synth
->phase
;
852 pcm
= synth
->pcm
.samples
[ch
];
854 for (s
= 0; s
< ns
; ++s
) {
855 dct32((*sbsample
)[s
], phase
>> 1,
856 (*filter
)[0][phase
& 1], (*filter
)[1][phase
& 1]);
858 p
= (phase
- 1) & 0xf;
860 /* calculate 32 samples */
861 fe
= &(*filter
)[0][ phase
& 1][0];
862 fx
= &(*filter
)[0][~phase
& 1][0];
863 fo
= &(*filter
)[1][~phase
& 1][0];
865 D0ptr
= (void*)&D
[0][ p
];
866 D1ptr
= (void*)&D
[0][-p
];
871 PROD_O(hi
, lo
, *fx
, ptr
+1);
873 PROD_A(hi
, lo
, *fe
, ptr
);
874 pcm
[0] = SHIFT(MLZ(hi
, lo
));
877 synth_full_odd_sbsample(pcm
, fo
, fe
, D0ptr
, D1ptr
);
884 PROD_O(hi
, lo
, *fo
, ptr
+1);
885 pcm
[0] = SHIFT(-MLZ(hi
, lo
));
890 PROD_O(hi
, lo
, *fx
, ptr
);
892 PROD_A(hi
, lo
, *fe
, ptr
+1);
893 pcm
[0] = SHIFT(MLZ(hi
, lo
));
896 synth_full_even_sbsample(pcm
, fo
, fe
, D0ptr
, D1ptr
);
903 PROD_O(hi
, lo
, *fo
, ptr
);
904 pcm
[0] = SHIFT(-MLZ(hi
, lo
));
908 phase
= (phase
+ 1) % 16;
913 # else /* not FPM_COLDFIRE_EMAC and not FPM_ARM */
915 #define PROD_O(hi, lo, f, ptr, offset) \
916 ML0(hi, lo, (*f)[0], ptr[ 0+offset]); \
917 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
918 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
919 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
920 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
921 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
922 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
923 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
925 #define PROD_A(hi, lo, f, ptr, offset) \
926 MLA(hi, lo, (*f)[0], ptr[ 0+offset]); \
927 MLA(hi, lo, (*f)[1], ptr[14+offset]); \
928 MLA(hi, lo, (*f)[2], ptr[12+offset]); \
929 MLA(hi, lo, (*f)[3], ptr[10+offset]); \
930 MLA(hi, lo, (*f)[4], ptr[ 8+offset]); \
931 MLA(hi, lo, (*f)[5], ptr[ 6+offset]); \
932 MLA(hi, lo, (*f)[6], ptr[ 4+offset]); \
933 MLA(hi, lo, (*f)[7], ptr[ 2+offset]);
935 #define PROD_SB(hi, lo, ptr, offset, first_idx, last_idx) \
936 ML0(hi, lo, (*fe)[0], ptr[first_idx]); \
937 MLA(hi, lo, (*fe)[1], ptr[16+offset]); \
938 MLA(hi, lo, (*fe)[2], ptr[18+offset]); \
939 MLA(hi, lo, (*fe)[3], ptr[20+offset]); \
940 MLA(hi, lo, (*fe)[4], ptr[22+offset]); \
941 MLA(hi, lo, (*fe)[5], ptr[24+offset]); \
942 MLA(hi, lo, (*fe)[6], ptr[26+offset]); \
943 MLA(hi, lo, (*fe)[7], ptr[28+offset]); \
944 MLA(hi, lo, (*fo)[7], ptr[29-offset]); \
945 MLA(hi, lo, (*fo)[6], ptr[27-offset]); \
946 MLA(hi, lo, (*fo)[5], ptr[25-offset]); \
947 MLA(hi, lo, (*fo)[4], ptr[23-offset]); \
948 MLA(hi, lo, (*fo)[3], ptr[21-offset]); \
949 MLA(hi, lo, (*fo)[2], ptr[19-offset]); \
950 MLA(hi, lo, (*fo)[1], ptr[17-offset]); \
951 MLA(hi, lo, (*fo)[0], ptr[last_idx ]);
954 void synth_full(struct mad_synth
*synth
, struct mad_frame
const *frame
,
955 unsigned int nch
, unsigned int ns
)
958 unsigned int phase
, ch
, s
;
959 mad_fixed_t
*pcm
, (*filter
)[2][2][16][8];
960 mad_fixed_t (*sbsample
)[36][32];
961 mad_fixed_t (*fe
)[8], (*fx
)[8], (*fo
)[8];
962 mad_fixed_t
const (*D0ptr
)[32], *ptr
;
963 mad_fixed_t
const (*D1ptr
)[32];
967 for (ch
= 0; ch
< nch
; ++ch
) {
968 sbsample
= &(*frame
->sbsample_prev
)[ch
];
969 filter
= &synth
->filter
[ch
];
970 phase
= synth
->phase
;
971 pcm
= synth
->pcm
.samples
[ch
];
973 for (s
= 0; s
< ns
; ++s
) {
974 dct32((*sbsample
)[s
], phase
>> 1,
975 (*filter
)[0][phase
& 1], (*filter
)[1][phase
& 1]);
977 p
= (phase
- 1) & 0xf;
979 /* calculate 32 samples */
980 fe
= &(*filter
)[0][ phase
& 1][0];
981 fx
= &(*filter
)[0][~phase
& 1][0];
982 fo
= &(*filter
)[1][~phase
& 1][0];
984 D0ptr
= (void*)&D
[0][ p
];
985 D1ptr
= (void*)&D
[0][-p
];
990 PROD_O(hi
, lo
, fx
, ptr
, 1)
992 PROD_A(hi
, lo
, fe
, ptr
, 0)
993 pcm
[0] = SHIFT(MLZ(hi
, lo
));
996 for (sb
= 15; sb
; sb
--, fo
++)
1002 /* D[32 - sb][i] == -D[sb][31 - i] */
1004 PROD_O(hi
, lo
, fo
, ptr
, 1)
1006 PROD_A(hi
, lo
, fe
, ptr
, 0)
1007 pcm
[-sb
] = SHIFT(MLZ(hi
, lo
));
1010 PROD_SB(hi
, lo
, ptr
, 1, 15, 30)
1011 pcm
[sb
] = SHIFT(MLZ(hi
, lo
));
1015 PROD_O(hi
, lo
, fo
, ptr
, 1)
1016 pcm
[0] = SHIFT(-MLZ(hi
, lo
));
1021 PROD_O(hi
, lo
, fx
, ptr
, 0)
1023 PROD_A(hi
, lo
, fe
, ptr
, 1)
1024 pcm
[0] = SHIFT(MLZ(hi
, lo
));
1027 for (sb
= 15; sb
; sb
--, fo
++)
1033 /* D[32 - sb][i] == -D[sb][31 - i] */
1035 PROD_O(hi
, lo
, fo
, ptr
, 0)
1037 PROD_A(hi
, lo
, fe
, ptr
, 1)
1038 pcm
[-sb
] = SHIFT(MLZ(hi
, lo
));
1041 PROD_SB(hi
, lo
, ptr
, 0, 30, 15)
1042 pcm
[sb
] = SHIFT(MLZ(hi
, lo
));
1046 PROD_O(hi
, lo
, fo
, ptr
, 0)
1047 pcm
[0] = SHIFT(-MLZ(hi
, lo
));
1051 phase
= (phase
+ 1) % 16;
1055 # endif /* FPM_COLDFIRE_EMAC, FPM_ARM */
1057 #if 0 /* rockbox: unused */
1059 * NAME: synth->half()
1060 * DESCRIPTION: perform half frequency PCM synthesis
1063 void synth_half(struct mad_synth
*synth
, struct mad_frame
const *frame
,
1064 unsigned int nch
, unsigned int ns
)
1066 unsigned int phase
, ch
, s
, sb
, pe
, po
;
1067 mad_fixed_t
*pcm1
, *pcm2
, (*filter
)[2][2][16][8];
1068 mad_fixed_t (*sbsample
)[36][32];
1069 register mad_fixed_t (*fe
)[8], (*fx
)[8], (*fo
)[8];
1070 register mad_fixed_t
const (*Dptr
)[32], *ptr
;
1071 register mad_fixed64hi_t hi
;
1072 register mad_fixed64lo_t lo
;
1074 for (ch
= 0; ch
< nch
; ++ch
) {
1075 sbsample
= &(*frame
->sbsample_prev
)[ch
];
1076 filter
= &synth
->filter
[ch
];
1077 phase
= synth
->phase
;
1078 pcm1
= synth
->pcm
.samples
[ch
];
1080 for (s
= 0; s
< ns
; ++s
) {
1081 dct32((*sbsample
)[s
], phase
>> 1,
1082 (*filter
)[0][phase
& 1], (*filter
)[1][phase
& 1]);
1085 po
= ((phase
- 1) & 0xf) | 1;
1087 /* calculate 16 samples */
1089 fe
= &(*filter
)[0][ phase
& 1][0];
1090 fx
= &(*filter
)[0][~phase
& 1][0];
1091 fo
= &(*filter
)[1][~phase
& 1][0];
1096 ML0(hi
, lo
, (*fx
)[0], ptr
[ 0]);
1097 MLA(hi
, lo
, (*fx
)[1], ptr
[14]);
1098 MLA(hi
, lo
, (*fx
)[2], ptr
[12]);
1099 MLA(hi
, lo
, (*fx
)[3], ptr
[10]);
1100 MLA(hi
, lo
, (*fx
)[4], ptr
[ 8]);
1101 MLA(hi
, lo
, (*fx
)[5], ptr
[ 6]);
1102 MLA(hi
, lo
, (*fx
)[6], ptr
[ 4]);
1103 MLA(hi
, lo
, (*fx
)[7], ptr
[ 2]);
1107 MLA(hi
, lo
, (*fe
)[0], ptr
[ 0]);
1108 MLA(hi
, lo
, (*fe
)[1], ptr
[14]);
1109 MLA(hi
, lo
, (*fe
)[2], ptr
[12]);
1110 MLA(hi
, lo
, (*fe
)[3], ptr
[10]);
1111 MLA(hi
, lo
, (*fe
)[4], ptr
[ 8]);
1112 MLA(hi
, lo
, (*fe
)[5], ptr
[ 6]);
1113 MLA(hi
, lo
, (*fe
)[6], ptr
[ 4]);
1114 MLA(hi
, lo
, (*fe
)[7], ptr
[ 2]);
1116 *pcm1
++ = SHIFT(MLZ(hi
, lo
));
1120 for (sb
= 1; sb
< 16; ++sb
) {
1124 /* D[32 - sb][i] == -D[sb][31 - i] */
1128 ML0(hi
, lo
, (*fo
)[0], ptr
[ 0]);
1129 MLA(hi
, lo
, (*fo
)[1], ptr
[14]);
1130 MLA(hi
, lo
, (*fo
)[2], ptr
[12]);
1131 MLA(hi
, lo
, (*fo
)[3], ptr
[10]);
1132 MLA(hi
, lo
, (*fo
)[4], ptr
[ 8]);
1133 MLA(hi
, lo
, (*fo
)[5], ptr
[ 6]);
1134 MLA(hi
, lo
, (*fo
)[6], ptr
[ 4]);
1135 MLA(hi
, lo
, (*fo
)[7], ptr
[ 2]);
1139 MLA(hi
, lo
, (*fe
)[7], ptr
[ 2]);
1140 MLA(hi
, lo
, (*fe
)[6], ptr
[ 4]);
1141 MLA(hi
, lo
, (*fe
)[5], ptr
[ 6]);
1142 MLA(hi
, lo
, (*fe
)[4], ptr
[ 8]);
1143 MLA(hi
, lo
, (*fe
)[3], ptr
[10]);
1144 MLA(hi
, lo
, (*fe
)[2], ptr
[12]);
1145 MLA(hi
, lo
, (*fe
)[1], ptr
[14]);
1146 MLA(hi
, lo
, (*fe
)[0], ptr
[ 0]);
1148 *pcm1
++ = SHIFT(MLZ(hi
, lo
));
1151 ML0(hi
, lo
, (*fo
)[7], ptr
[31 - 2]);
1152 MLA(hi
, lo
, (*fo
)[6], ptr
[31 - 4]);
1153 MLA(hi
, lo
, (*fo
)[5], ptr
[31 - 6]);
1154 MLA(hi
, lo
, (*fo
)[4], ptr
[31 - 8]);
1155 MLA(hi
, lo
, (*fo
)[3], ptr
[31 - 10]);
1156 MLA(hi
, lo
, (*fo
)[2], ptr
[31 - 12]);
1157 MLA(hi
, lo
, (*fo
)[1], ptr
[31 - 14]);
1158 MLA(hi
, lo
, (*fo
)[0], ptr
[31 - 16]);
1161 MLA(hi
, lo
, (*fe
)[0], ptr
[31 - 16]);
1162 MLA(hi
, lo
, (*fe
)[1], ptr
[31 - 14]);
1163 MLA(hi
, lo
, (*fe
)[2], ptr
[31 - 12]);
1164 MLA(hi
, lo
, (*fe
)[3], ptr
[31 - 10]);
1165 MLA(hi
, lo
, (*fe
)[4], ptr
[31 - 8]);
1166 MLA(hi
, lo
, (*fe
)[5], ptr
[31 - 6]);
1167 MLA(hi
, lo
, (*fe
)[6], ptr
[31 - 4]);
1168 MLA(hi
, lo
, (*fe
)[7], ptr
[31 - 2]);
1170 *pcm2
-- = SHIFT(MLZ(hi
, lo
));
1179 ML0(hi
, lo
, (*fo
)[0], ptr
[ 0]);
1180 MLA(hi
, lo
, (*fo
)[1], ptr
[14]);
1181 MLA(hi
, lo
, (*fo
)[2], ptr
[12]);
1182 MLA(hi
, lo
, (*fo
)[3], ptr
[10]);
1183 MLA(hi
, lo
, (*fo
)[4], ptr
[ 8]);
1184 MLA(hi
, lo
, (*fo
)[5], ptr
[ 6]);
1185 MLA(hi
, lo
, (*fo
)[6], ptr
[ 4]);
1186 MLA(hi
, lo
, (*fo
)[7], ptr
[ 2]);
1188 *pcm1
= SHIFT(-MLZ(hi
, lo
));
1191 phase
= (phase
+ 1) % 16;
1198 * NAME: synth->frame()
1199 * DESCRIPTION: perform PCM synthesis of frame subband samples
1201 void mad_synth_frame(struct mad_synth
*synth
, struct mad_frame
const *frame
)
1203 unsigned int nch
, ns
;
1204 #if 0 /* rockbox: unused */
1205 void (*synth_frame
)(struct mad_synth
*, struct mad_frame
const *,
1206 unsigned int, unsigned int);
1209 nch
= MAD_NCHANNELS(&frame
->header
);
1210 ns
= MAD_NSBSAMPLES(&frame
->header
);
1212 synth
->pcm
.samplerate
= frame
->header
.samplerate
;
1213 synth
->pcm
.channels
= nch
;
1214 synth
->pcm
.length
= 32 * ns
;
1216 #if 0 /* rockbox: unused */
1217 synth_frame
= synth_full
;
1219 if (frame
->options
& MAD_OPTION_HALFSAMPLERATE
) {
1220 synth
->pcm
.samplerate
/= 2;
1221 synth
->pcm
.length
/= 2;
1223 synth_frame
= synth_half
;
1226 synth_frame(synth
, frame
, nch
, ns
);
1228 synth_full(synth
, frame
, nch
, ns
);
1231 synth
->phase
= (synth
->phase
+ ns
) % 16;