Submit FS#11461. Major speedup for aac he profile (PP5002 +20%, PP5020 +15%, PP5022...
[kugel-rb.git] / apps / codecs / libfaad / sbr_qmf.c
blob7b70cc6a5e095c31fabec88156f5b25af46214b9
1 /*
2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 ** Any non-GPL usage of this software or parts of this software is strictly
20 ** forbidden.
22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
25 ** $Id$
26 **/
28 #include "common.h"
29 #include "structs.h"
31 #ifdef SBR_DEC
34 #include <stdlib.h>
35 #include <string.h>
36 #include "sbr_dct.h"
37 #include "sbr_qmf.h"
38 #include "sbr_qmf_c.h"
39 #include "sbr_syntax.h"
41 #ifdef FIXED_POINT
42 #define FAAD_SYNTHESIS_SCALE(X) ((X)>>1)
43 #define FAAD_ANALYSIS_SCALE1(X) ((X)>>4)
44 #define FAAD_ANALYSIS_SCALE2(X) ((X))
45 #else
46 #define FAAD_ANALYSIS_SCALE1(X) ((X)*scale)
47 #define FAAD_ANALYSIS_SCALE1(X) ((X))
48 #define FAAD_ANALYSIS_SCALE2(X) (2.*(X))
49 #endif
51 qmfa_info *qmfa_init(uint8_t channels)
53 qmfa_info *qmfa = (qmfa_info*)faad_malloc(sizeof(qmfa_info));
55 /* x is implemented as double ringbuffer */
56 qmfa->x = (real_t*)faad_malloc(2 * channels * 10 * sizeof(real_t));
57 memset(qmfa->x, 0, 2 * channels * 10 * sizeof(real_t));
59 /* ringbuffer index */
60 qmfa->x_index = 0;
62 qmfa->channels = channels;
64 return qmfa;
67 void qmfa_end(qmfa_info *qmfa)
69 if (qmfa)
71 if (qmfa->x) faad_free(qmfa->x);
72 faad_free(qmfa);
76 void sbr_qmf_analysis_32(sbr_info *sbr, qmfa_info *qmfa, const real_t *input,
77 qmf_t X[MAX_NTSRHFG][64], uint8_t offset, uint8_t kx)
79 ALIGN real_t u[64];
80 #ifndef SBR_LOW_POWER
81 ALIGN real_t real[32];
82 ALIGN real_t imag[32];
83 #else
84 ALIGN real_t y[32];
85 #endif
86 qmf_t *pX;
87 uint32_t in = 0;
88 uint32_t l, idx0, idx1;
90 /* qmf subsample l */
91 for (l = 0; l < sbr->numTimeSlotsRate; l++)
93 int32_t n;
95 /* shift input buffer x */
96 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */
97 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
99 /* add new samples to input buffer x */
100 idx0 = qmfa->x_index + 31; idx1 = idx0 + 320;
101 for (n = 32 - 1; n >= 0; n-=4)
103 qmfa->x[idx0--] = qmfa->x[idx1--] = (input[in++]);
104 qmfa->x[idx0--] = qmfa->x[idx1--] = (input[in++]);
105 qmfa->x[idx0--] = qmfa->x[idx1--] = (input[in++]);
106 qmfa->x[idx0--] = qmfa->x[idx1--] = (input[in++]);
109 /* window and summation to create array u */
110 for (n = 0; n < 64; n++)
112 idx0 = qmfa->x_index + n; idx1 = n * 2;
113 u[n] = FAAD_ANALYSIS_SCALE1(
114 MUL_F(qmfa->x[idx0 ], qmf_c[idx1]) +
115 MUL_F(qmfa->x[idx0 + 64], qmf_c[idx1 + 2 * 64]) +
116 MUL_F(qmfa->x[idx0 + 128], qmf_c[idx1 + 2 * 128]) +
117 MUL_F(qmfa->x[idx0 + 192], qmf_c[idx1 + 2 * 192]) +
118 MUL_F(qmfa->x[idx0 + 256], qmf_c[idx1 + 2 * 256]));
121 /* update ringbuffer index */
122 qmfa->x_index -= 32;
123 if (qmfa->x_index < 0)
124 qmfa->x_index = (320-32);
126 /* calculate 32 subband samples by introducing X */
127 #ifdef SBR_LOW_POWER
128 y[0] = u[48];
129 for (n = 1; n < 16; n++)
130 y[n] = u[n+48] + u[48-n];
131 for (n = 16; n < 32; n++)
132 y[n] = -u[n-16] + u[48-n];
134 DCT3_32_unscaled(u, y);
136 for (n = 0; n < 32; n++)
138 if (n < kx)
140 QMF_RE(X[l + offset][n]) = FAAD_ANALYSIS_SCALE2(u[n]);
141 } else {
142 QMF_RE(X[l + offset][n]) = 0;
145 #else /* #ifdef SBR_LOW_POWER */
147 // Reordering of data moved from DCT_IV to here
148 idx0 = 30; idx1 = 63;
149 imag[31] = u[ 1]; real[ 0] = u[ 0];
150 for (n = 1; n < 31; n+=3)
152 imag[idx0--] = u[n+1]; real[n ] = -u[idx1--];
153 imag[idx0--] = u[n+2]; real[n+1] = -u[idx1--];
154 imag[idx0--] = u[n+3]; real[n+2] = -u[idx1--];
156 imag[ 0] = u[32]; real[31] = -u[33];
158 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
159 dct4_kernel(real, imag);
161 // Reordering of data moved from DCT_IV to here
162 /* Step 1: Calculate all non-zero pairs */
163 pX = X[l + offset];
164 for (n = 0; n < kx/2; n++) {
165 idx0 = 2*n; idx1 = idx0 + 1;
166 QMF_RE(pX[idx0]) = FAAD_ANALYSIS_SCALE2( real[n ]);
167 QMF_IM(pX[idx0]) = FAAD_ANALYSIS_SCALE2( imag[n ]);
168 QMF_RE(pX[idx1]) = FAAD_ANALYSIS_SCALE2(-imag[31-n]);
169 QMF_IM(pX[idx1]) = FAAD_ANALYSIS_SCALE2(-real[31-n]);
171 /* Step 2: Calculate a single pair with half zero'ed */
172 if (kx&1) {
173 idx0 = 2*n; idx1 = idx0 + 1;
174 QMF_RE(pX[idx0]) = FAAD_ANALYSIS_SCALE2( real[n]);
175 QMF_IM(pX[idx0]) = FAAD_ANALYSIS_SCALE2( imag[n]);
176 QMF_RE(pX[idx1]) = QMF_IM(pX[idx1]) = 0;
177 n++;
179 /* Step 3: All other are zero'ed */
180 for (; n < 16; n++) {
181 idx0 = 2*n; idx1 = idx0 + 1;
182 QMF_RE(pX[idx0]) = QMF_IM(pX[idx0]) = 0;
183 QMF_RE(pX[idx1]) = QMF_IM(pX[idx1]) = 0;
185 #endif /* #ifdef SBR_LOW_POWER */
189 static const complex_t qmf32_pre_twiddle[] =
191 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
192 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
193 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
194 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
195 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
196 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
197 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
198 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
199 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
200 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
201 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
202 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
203 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
204 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
205 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
206 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
207 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
208 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
209 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
210 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
211 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
212 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
213 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
214 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
215 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
216 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
217 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
218 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
219 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
220 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
221 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
222 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
225 qmfs_info *qmfs_init(uint8_t channels)
227 qmfs_info *qmfs = (qmfs_info*)faad_malloc(sizeof(qmfs_info));
229 /* v is a double ringbuffer */
230 qmfs->v = (real_t*)faad_malloc(2 * channels * 20 * sizeof(real_t));
231 memset(qmfs->v, 0, 2 * channels * 20 * sizeof(real_t));
233 qmfs->v_index = 0;
235 qmfs->channels = channels;
237 return qmfs;
240 void qmfs_end(qmfs_info *qmfs)
242 if (qmfs)
244 if (qmfs->v) faad_free(qmfs->v);
245 faad_free(qmfs);
249 #ifdef SBR_LOW_POWER
251 void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
252 real_t *output)
254 ALIGN real_t x[16];
255 ALIGN real_t y[16];
256 int16_t n, k, out = 0;
257 uint8_t l;
259 /* qmf subsample l */
260 for (l = 0; l < sbr->numTimeSlotsRate; l++)
262 /* shift buffers */
263 /* we are not shifting v, it is a double ringbuffer */
264 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
266 /* calculate 64 samples */
267 for (k = 0; k < 16; k++)
269 #ifdef FIXED_POINT
270 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k]));
271 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k]));
272 #else
273 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k])) / 32.0;
274 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k])) / 32.0;
275 #endif
278 /* even n samples */
279 DCT2_16_unscaled(x, x);
280 /* odd n samples */
281 DCT4_16(y, y);
283 for (n = 8; n < 24; n++)
285 qmfs->v[qmfs->v_index + n*2] = qmfs->v[qmfs->v_index + 640 + n*2] = x[n-8];
286 qmfs->v[qmfs->v_index + n*2+1] = qmfs->v[qmfs->v_index + 640 + n*2+1] = y[n-8];
288 for (n = 0; n < 16; n++)
290 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 640 + n] = qmfs->v[qmfs->v_index + 32-n];
292 qmfs->v[qmfs->v_index + 48] = qmfs->v[qmfs->v_index + 640 + 48] = 0;
293 for (n = 1; n < 16; n++)
295 qmfs->v[qmfs->v_index + 48+n] = qmfs->v[qmfs->v_index + 640 + 48+n] = -qmfs->v[qmfs->v_index + 48-n];
298 /* calculate 32 output samples and window */
299 for (k = 0; k < 32; k++)
301 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[2*k]) +
302 MUL_F(qmfs->v[qmfs->v_index + 96 + k], qmf_c[ 64 + 2*k]) +
303 MUL_F(qmfs->v[qmfs->v_index + 128 + k], qmf_c[128 + 2*k]) +
304 MUL_F(qmfs->v[qmfs->v_index + 224 + k], qmf_c[192 + 2*k]) +
305 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[256 + 2*k]) +
306 MUL_F(qmfs->v[qmfs->v_index + 352 + k], qmf_c[320 + 2*k]) +
307 MUL_F(qmfs->v[qmfs->v_index + 384 + k], qmf_c[384 + 2*k]) +
308 MUL_F(qmfs->v[qmfs->v_index + 480 + k], qmf_c[448 + 2*k]) +
309 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[512 + 2*k]) +
310 MUL_F(qmfs->v[qmfs->v_index + 608 + k], qmf_c[576 + 2*k]);
313 /* update the ringbuffer index */
314 qmfs->v_index -= 64;
315 if (qmfs->v_index < 0)
316 qmfs->v_index = (640-64);
320 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
321 real_t *output)
323 ALIGN real_t x[64];
324 ALIGN real_t y[64];
325 int16_t n, k, out = 0;
326 uint8_t l;
329 /* qmf subsample l */
330 for (l = 0; l < sbr->numTimeSlotsRate; l++)
332 /* shift buffers */
333 /* we are not shifting v, it is a double ringbuffer */
334 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
336 /* calculate 128 samples */
337 for (k = 0; k < 32; k++)
339 #ifdef FIXED_POINT
340 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k]));
341 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k]));
342 #else
343 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k])) / 32.0;
344 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k])) / 32.0;
345 #endif
348 /* even n samples */
349 DCT2_32_unscaled(x, x);
350 /* odd n samples */
351 DCT4_32(y, y);
353 for (n = 16; n < 48; n++)
355 qmfs->v[qmfs->v_index + n*2] = qmfs->v[qmfs->v_index + 1280 + n*2] = x[n-16];
356 qmfs->v[qmfs->v_index + n*2+1] = qmfs->v[qmfs->v_index + 1280 + n*2+1] = y[n-16];
358 for (n = 0; n < 32; n++)
360 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 1280 + n] = qmfs->v[qmfs->v_index + 64-n];
362 qmfs->v[qmfs->v_index + 96] = qmfs->v[qmfs->v_index + 1280 + 96] = 0;
363 for (n = 1; n < 32; n++)
365 qmfs->v[qmfs->v_index + 96+n] = qmfs->v[qmfs->v_index + 1280 + 96+n] = -qmfs->v[qmfs->v_index + 96-n];
368 /* calculate 64 output samples and window */
369 for (k = 0; k < 64; k++)
371 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[k]) +
372 MUL_F(qmfs->v[qmfs->v_index + 192 + k], qmf_c[64 + k]) +
373 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[128 + k]) +
374 MUL_F(qmfs->v[qmfs->v_index + 256 + 192 + k], qmf_c[128 + 64 + k]) +
375 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[256 + k]) +
376 MUL_F(qmfs->v[qmfs->v_index + 512 + 192 + k], qmf_c[256 + 64 + k]) +
377 MUL_F(qmfs->v[qmfs->v_index + 768 + k], qmf_c[384 + k]) +
378 MUL_F(qmfs->v[qmfs->v_index + 768 + 192 + k], qmf_c[384 + 64 + k]) +
379 MUL_F(qmfs->v[qmfs->v_index + 1024 + k], qmf_c[512 + k]) +
380 MUL_F(qmfs->v[qmfs->v_index + 1024 + 192 + k], qmf_c[512 + 64 + k]);
383 /* update the ringbuffer index */
384 qmfs->v_index -= 128;
385 if (qmfs->v_index < 0)
386 qmfs->v_index = (1280-128);
389 #else /* #ifdef SBR_LOW_POWER */
391 #define FAAD_CMPLX_PRETWIDDLE_SUB(k) \
392 (MUL_F(QMF_RE(X[l][k]), RE(qmf32_pre_twiddle[k])) - \
393 MUL_F(QMF_IM(X[l][k]), IM(qmf32_pre_twiddle[k]))) \
395 #define FAAD_CMPLX_PRETWIDDLE_ADD(k) \
396 (MUL_F(QMF_IM(X[l][k]), RE(qmf32_pre_twiddle[k])) + \
397 MUL_F(QMF_RE(X[l][k]), IM(qmf32_pre_twiddle[k]))) \
399 void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
400 real_t *output)
402 ALIGN real_t x1[32];
403 ALIGN real_t x2[32];
404 #ifndef FIXED_POINT
405 real_t scale = 1.f/64.f;
406 #endif
407 int32_t n, k, idx0, idx1, out = 0;
408 uint32_t l;
410 /* qmf subsample l */
411 for (l = 0; l < sbr->numTimeSlotsRate; l++)
413 /* shift buffer v */
414 /* buffer is not shifted, we are using a ringbuffer */
415 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
417 /* calculate 64 samples */
418 /* complex pre-twiddle */
419 for (k = 0; k < 32;)
421 x1[k] = FAAD_CMPLX_PRETWIDDLE_SUB(k); x2[k] = FAAD_CMPLX_PRETWIDDLE_ADD(k); k++;
422 x1[k] = FAAD_CMPLX_PRETWIDDLE_SUB(k); x2[k] = FAAD_CMPLX_PRETWIDDLE_ADD(k); k++;
423 x1[k] = FAAD_CMPLX_PRETWIDDLE_SUB(k); x2[k] = FAAD_CMPLX_PRETWIDDLE_ADD(k); k++;
424 x1[k] = FAAD_CMPLX_PRETWIDDLE_SUB(k); x2[k] = FAAD_CMPLX_PRETWIDDLE_ADD(k); k++;
427 /* transform */
428 DCT4_32(x1, x1);
429 DST4_32(x2, x2);
431 idx0 = qmfs->v_index;
432 idx1 = qmfs->v_index + 63;
433 for (n = 0; n < 32; n+=2)
435 qmfs->v[idx0] = qmfs->v[idx0 + 640] = -x1[n ] + x2[n ]; idx0++;
436 qmfs->v[idx1] = qmfs->v[idx1 + 640] = x1[n ] + x2[n ]; idx1--;
437 qmfs->v[idx0] = qmfs->v[idx0 + 640] = -x1[n+1] + x2[n+1]; idx0++;
438 qmfs->v[idx1] = qmfs->v[idx1 + 640] = x1[n+1] + x2[n+1]; idx1--;
441 /* calculate 32 output samples and window */
442 for (k = 0; k < 32; k++)
444 idx0 = qmfs->v_index + k; idx1 = 2*k;
445 output[out++] = FAAD_SYNTHESIS_SCALE(
446 MUL_F(qmfs->v[idx0 ], qmf_c[idx1 ]) +
447 MUL_F(qmfs->v[idx0 + 96], qmf_c[idx1 + 64]) +
448 MUL_F(qmfs->v[idx0 + 128], qmf_c[idx1 + 128]) +
449 MUL_F(qmfs->v[idx0 + 224], qmf_c[idx1 + 192]) +
450 MUL_F(qmfs->v[idx0 + 256], qmf_c[idx1 + 256]) +
451 MUL_F(qmfs->v[idx0 + 352], qmf_c[idx1 + 320]) +
452 MUL_F(qmfs->v[idx0 + 384], qmf_c[idx1 + 384]) +
453 MUL_F(qmfs->v[idx0 + 480], qmf_c[idx1 + 448]) +
454 MUL_F(qmfs->v[idx0 + 512], qmf_c[idx1 + 512]) +
455 MUL_F(qmfs->v[idx0 + 608], qmf_c[idx1 + 576]));
458 /* update ringbuffer index */
459 qmfs->v_index -= 64;
460 if (qmfs->v_index < 0)
461 qmfs->v_index = (640 - 64);
465 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
466 real_t *output)
468 ALIGN real_t real1[32];
469 ALIGN real_t imag1[32];
470 ALIGN real_t real2[32];
471 ALIGN real_t imag2[32];
472 qmf_t * pX;
473 real_t * p_buf_1, * p_buf_3;
474 #ifndef FIXED_POINT
475 real_t scale = 1.f/64.f;
476 #endif
477 int32_t n, k, idx0, idx1, out = 0;
478 uint32_t l;
480 /* qmf subsample l */
481 for (l = 0; l < sbr->numTimeSlotsRate; l++)
483 /* shift buffer v */
484 /* buffer is not shifted, we use double ringbuffer */
485 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
487 /* calculate 128 samples */
488 pX = X[l];
489 for (k = 0; k < 32; k++)
491 idx0 = 2*k; idx1 = idx0+1;
492 real1[ k] = QMF_RE(pX[idx0]); imag2[ k] = QMF_IM(pX[idx0]);
493 imag1[31-k] = QMF_RE(pX[idx1]); real2[31-k] = QMF_IM(pX[idx1]);
496 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
497 dct4_kernel(real1, imag1);
498 dct4_kernel(real2, imag2);
500 p_buf_1 = qmfs->v + qmfs->v_index;
501 p_buf_3 = p_buf_1 + 1280;
503 idx0 = 0; idx1 = 127;
504 for (n = 0; n < 32; n++)
506 p_buf_1[idx0] = p_buf_3[idx0] = real2[ n] - real1[ n]; idx0++;
507 p_buf_1[idx1] = p_buf_3[idx1] = real2[ n] + real1[ n]; idx1--;
508 p_buf_1[idx0] = p_buf_3[idx0] = imag2[31-n] + imag1[31-n]; idx0++;
509 p_buf_1[idx1] = p_buf_3[idx1] = imag2[31-n] - imag1[31-n]; idx1--;
512 p_buf_1 = qmfs->v + qmfs->v_index;
514 /* calculate 64 output samples and window */
515 for (k = 0; k < 64; k++)
517 output[out++] = FAAD_SYNTHESIS_SCALE(
518 MUL_F(p_buf_1[k ], qmf_c[k ]) +
519 MUL_F(p_buf_1[k+ 192 ], qmf_c[k+ 64]) +
520 MUL_F(p_buf_1[k+ 256 ], qmf_c[k+128]) +
521 MUL_F(p_buf_1[k+ 256+192], qmf_c[k+192]) +
522 MUL_F(p_buf_1[k+ 512 ], qmf_c[k+256]) +
523 MUL_F(p_buf_1[k+ 512+192], qmf_c[k+320]) +
524 MUL_F(p_buf_1[k+ 768 ], qmf_c[k+384]) +
525 MUL_F(p_buf_1[k+ 768+192], qmf_c[k+448]) +
526 MUL_F(p_buf_1[k+1024 ], qmf_c[k+512]) +
527 MUL_F(p_buf_1[k+1024+192], qmf_c[k+576]));
530 /* update ringbuffer index */
531 qmfs->v_index -= 128;
532 if (qmfs->v_index < 0)
533 qmfs->v_index = (1280 - 128);
536 #endif /* #ifdef SBR_LOW_POWER */
538 #endif /* #ifdef SBR_DEC */