2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 ** Any non-GPL usage of this software or parts of this software is strictly
22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
38 #include "sbr_qmf_c.h"
39 #include "sbr_syntax.h"
42 #define FAAD_SYNTHESIS_SCALE(X) ((X)>>1)
43 #define FAAD_ANALYSIS_SCALE1(X) ((X)>>4)
44 #define FAAD_ANALYSIS_SCALE2(X) ((X))
46 #define FAAD_ANALYSIS_SCALE1(X) ((X)*scale)
47 #define FAAD_ANALYSIS_SCALE1(X) ((X))
48 #define FAAD_ANALYSIS_SCALE2(X) (2.*(X))
51 qmfa_info
*qmfa_init(uint8_t channels
)
53 qmfa_info
*qmfa
= (qmfa_info
*)faad_malloc(sizeof(qmfa_info
));
55 /* x is implemented as double ringbuffer */
56 qmfa
->x
= (real_t
*)faad_malloc(2 * channels
* 10 * sizeof(real_t
));
57 memset(qmfa
->x
, 0, 2 * channels
* 10 * sizeof(real_t
));
59 /* ringbuffer index */
62 qmfa
->channels
= channels
;
67 void qmfa_end(qmfa_info
*qmfa
)
71 if (qmfa
->x
) faad_free(qmfa
->x
);
76 void sbr_qmf_analysis_32(sbr_info
*sbr
, qmfa_info
*qmfa
, const real_t
*input
,
77 qmf_t X
[MAX_NTSRHFG
][64], uint8_t offset
, uint8_t kx
)
81 ALIGN real_t real
[32];
82 ALIGN real_t imag
[32];
88 uint32_t l
, idx0
, idx1
;
91 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
95 /* shift input buffer x */
96 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */
97 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
99 /* add new samples to input buffer x */
100 idx0
= qmfa
->x_index
+ 31; idx1
= idx0
+ 320;
101 for (n
= 32 - 1; n
>= 0; n
-=4)
103 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
104 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
105 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
106 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
109 /* window and summation to create array u */
110 for (n
= 0; n
< 64; n
++)
112 idx0
= qmfa
->x_index
+ n
; idx1
= n
* 2 * 5;
113 u
[n
] = FAAD_ANALYSIS_SCALE1(
114 MUL_F(qmfa
->x
[idx0
], qmf_c
[idx1
]) +
115 MUL_F(qmfa
->x
[idx0
+ 64], qmf_c
[idx1
+ 2]) +
116 MUL_F(qmfa
->x
[idx0
+ 128], qmf_c
[idx1
+ 4]) +
117 MUL_F(qmfa
->x
[idx0
+ 192], qmf_c
[idx1
+ 6]) +
118 MUL_F(qmfa
->x
[idx0
+ 256], qmf_c
[idx1
+ 8]));
121 /* update ringbuffer index */
123 if (qmfa
->x_index
< 0)
124 qmfa
->x_index
= (320-32);
126 /* calculate 32 subband samples by introducing X */
129 for (n
= 1; n
< 16; n
++)
130 y
[n
] = u
[n
+48] + u
[48-n
];
131 for (n
= 16; n
< 32; n
++)
132 y
[n
] = -u
[n
-16] + u
[48-n
];
134 DCT3_32_unscaled(u
, y
);
136 for (n
= 0; n
< 32; n
++)
140 QMF_RE(X
[l
+ offset
][n
]) = FAAD_ANALYSIS_SCALE2(u
[n
]);
142 QMF_RE(X
[l
+ offset
][n
]) = 0;
145 #else /* #ifdef SBR_LOW_POWER */
147 // Reordering of data moved from DCT_IV to here
148 idx0
= 30; idx1
= 63;
149 imag
[31] = u
[ 1]; real
[ 0] = u
[ 0];
150 for (n
= 1; n
< 31; n
+=3)
152 imag
[idx0
--] = u
[n
+1]; real
[n
] = -u
[idx1
--];
153 imag
[idx0
--] = u
[n
+2]; real
[n
+1] = -u
[idx1
--];
154 imag
[idx0
--] = u
[n
+3]; real
[n
+2] = -u
[idx1
--];
156 imag
[ 0] = u
[32]; real
[31] = -u
[33];
158 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
159 dct4_kernel(real
, imag
);
161 // Reordering of data moved from DCT_IV to here
162 /* Step 1: Calculate all non-zero pairs */
164 for (n
= 0; n
< kx
/2; n
++) {
165 idx0
= 2*n
; idx1
= idx0
+ 1;
166 QMF_RE(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( real
[n
]);
167 QMF_IM(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( imag
[n
]);
168 QMF_RE(pX
[idx1
]) = FAAD_ANALYSIS_SCALE2(-imag
[31-n
]);
169 QMF_IM(pX
[idx1
]) = FAAD_ANALYSIS_SCALE2(-real
[31-n
]);
171 /* Step 2: Calculate a single pair with half zero'ed */
173 idx0
= 2*n
; idx1
= idx0
+ 1;
174 QMF_RE(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( real
[n
]);
175 QMF_IM(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( imag
[n
]);
176 QMF_RE(pX
[idx1
]) = QMF_IM(pX
[idx1
]) = 0;
179 /* Step 3: All other are zero'ed */
180 for (; n
< 16; n
++) {
181 idx0
= 2*n
; idx1
= idx0
+ 1;
182 QMF_RE(pX
[idx0
]) = QMF_IM(pX
[idx0
]) = 0;
183 QMF_RE(pX
[idx1
]) = QMF_IM(pX
[idx1
]) = 0;
185 #endif /* #ifdef SBR_LOW_POWER */
189 static const complex_t qmf32_pre_twiddle
[] =
191 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
192 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
193 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
194 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
195 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
196 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
197 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
198 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
199 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
200 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
201 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
202 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
203 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
204 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
205 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
206 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
207 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
208 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
209 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
210 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
211 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
212 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
213 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
214 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
215 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
216 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
217 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
218 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
219 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
220 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
221 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
222 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
225 qmfs_info
*qmfs_init(uint8_t channels
)
227 qmfs_info
*qmfs
= (qmfs_info
*)faad_malloc(sizeof(qmfs_info
));
229 /* v is a double ringbuffer */
230 qmfs
->v
= (real_t
*)faad_malloc(2 * channels
* 20 * sizeof(real_t
));
231 memset(qmfs
->v
, 0, 2 * channels
* 20 * sizeof(real_t
));
235 qmfs
->channels
= channels
;
240 void qmfs_end(qmfs_info
*qmfs
)
244 if (qmfs
->v
) faad_free(qmfs
->v
);
251 void sbr_qmf_synthesis_32(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
256 int16_t n
, k
, out
= 0;
259 /* qmf subsample l */
260 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
263 /* we are not shifting v, it is a double ringbuffer */
264 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
266 /* calculate 64 samples */
267 for (k
= 0; k
< 16; k
++)
270 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][31 - k
]));
271 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][31 - k
]));
273 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][31 - k
])) / 32.0;
274 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][31 - k
])) / 32.0;
279 DCT2_16_unscaled(x
, x
);
283 for (n
= 8; n
< 24; n
++)
285 qmfs
->v
[qmfs
->v_index
+ n
*2] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2] = x
[n
-8];
286 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2+1] = y
[n
-8];
288 for (n
= 0; n
< 16; n
++)
290 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 640 + n
] = qmfs
->v
[qmfs
->v_index
+ 32-n
];
292 qmfs
->v
[qmfs
->v_index
+ 48] = qmfs
->v
[qmfs
->v_index
+ 640 + 48] = 0;
293 for (n
= 1; n
< 16; n
++)
295 qmfs
->v
[qmfs
->v_index
+ 48+n
] = qmfs
->v
[qmfs
->v_index
+ 640 + 48+n
] = -qmfs
->v
[qmfs
->v_index
+ 48-n
];
298 /* calculate 32 output samples and window */
299 for (k
= 0; k
< 32; k
++)
301 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[ 2*k
*10]) +
302 MUL_F(qmfs
->v
[qmfs
->v_index
+ 96 + k
], qmf_c
[1 + 2*k
*10]) +
303 MUL_F(qmfs
->v
[qmfs
->v_index
+ 128 + k
], qmf_c
[2 + 2*k
*10]) +
304 MUL_F(qmfs
->v
[qmfs
->v_index
+ 224 + k
], qmf_c
[3 + 2*k
*10]) +
305 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[4 + 2*k
*10]) +
306 MUL_F(qmfs
->v
[qmfs
->v_index
+ 352 + k
], qmf_c
[5 + 2*k
*10]) +
307 MUL_F(qmfs
->v
[qmfs
->v_index
+ 384 + k
], qmf_c
[6 + 2*k
*10]) +
308 MUL_F(qmfs
->v
[qmfs
->v_index
+ 480 + k
], qmf_c
[7 + 2*k
*10]) +
309 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[8 + 2*k
*10]) +
310 MUL_F(qmfs
->v
[qmfs
->v_index
+ 608 + k
], qmf_c
[9 + 2*k
*10]);
313 /* update the ringbuffer index */
315 if (qmfs
->v_index
< 0)
316 qmfs
->v_index
= (640-64);
320 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
325 int16_t n
, k
, out
= 0;
329 /* qmf subsample l */
330 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
333 /* we are not shifting v, it is a double ringbuffer */
334 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
336 /* calculate 128 samples */
337 for (k
= 0; k
< 32; k
++)
340 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][63 - k
]));
341 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][63 - k
]));
343 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][63 - k
])) / 32.0;
344 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][63 - k
])) / 32.0;
349 DCT2_32_unscaled(x
, x
);
353 for (n
= 16; n
< 48; n
++)
355 qmfs
->v
[qmfs
->v_index
+ n
*2] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2] = x
[n
-16];
356 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2+1] = y
[n
-16];
358 for (n
= 0; n
< 32; n
++)
360 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
] = qmfs
->v
[qmfs
->v_index
+ 64-n
];
362 qmfs
->v
[qmfs
->v_index
+ 96] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96] = 0;
363 for (n
= 1; n
< 32; n
++)
365 qmfs
->v
[qmfs
->v_index
+ 96+n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96+n
] = -qmfs
->v
[qmfs
->v_index
+ 96-n
];
368 /* calculate 64 output samples and window */
369 for (k
= 0; k
< 64; k
++)
371 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[ k
*10]) +
372 MUL_F(qmfs
->v
[qmfs
->v_index
+ 192 + k
], qmf_c
[1 + k
*10]) +
373 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[2 + k
*10]) +
374 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + 192 + k
], qmf_c
[3 + k
*10]) +
375 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[4 + k
*10]) +
376 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + 192 + k
], qmf_c
[5 + k
*10]) +
377 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + k
], qmf_c
[6 + k
*10]) +
378 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + 192 + k
], qmf_c
[7 + k
*10]) +
379 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + k
], qmf_c
[8 + k
*10]) +
380 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + 192 + k
], qmf_c
[9 + k
*10]);
383 /* update the ringbuffer index */
384 qmfs
->v_index
-= 128;
385 if (qmfs
->v_index
< 0)
386 qmfs
->v_index
= (1280-128);
389 #else /* #ifdef SBR_LOW_POWER */
391 #define FAAD_CMPLX_PRETWIDDLE_SUB(k) \
392 (MUL_F(QMF_RE(X[l][k]), RE(qmf32_pre_twiddle[k])) - \
393 MUL_F(QMF_IM(X[l][k]), IM(qmf32_pre_twiddle[k]))) \
395 #define FAAD_CMPLX_PRETWIDDLE_ADD(k) \
396 (MUL_F(QMF_IM(X[l][k]), RE(qmf32_pre_twiddle[k])) + \
397 MUL_F(QMF_RE(X[l][k]), IM(qmf32_pre_twiddle[k]))) \
399 void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
405 real_t scale
= 1.f
/64.f
;
407 int32_t n
, k
, idx0
, idx1
, out
= 0;
410 /* qmf subsample l */
411 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
414 /* buffer is not shifted, we are using a ringbuffer */
415 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
417 /* calculate 64 samples */
418 /* complex pre-twiddle */
421 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
422 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
423 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
424 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
431 idx0
= qmfs
->v_index
;
432 idx1
= qmfs
->v_index
+ 63;
433 for (n
= 0; n
< 32; n
+=2)
435 qmfs
->v
[idx0
] = qmfs
->v
[idx0
+ 640] = -x1
[n
] + x2
[n
]; idx0
++;
436 qmfs
->v
[idx1
] = qmfs
->v
[idx1
+ 640] = x1
[n
] + x2
[n
]; idx1
--;
437 qmfs
->v
[idx0
] = qmfs
->v
[idx0
+ 640] = -x1
[n
+1] + x2
[n
+1]; idx0
++;
438 qmfs
->v
[idx1
] = qmfs
->v
[idx1
+ 640] = x1
[n
+1] + x2
[n
+1]; idx1
--;
441 /* calculate 32 output samples and window */
442 for (k
= 0; k
< 32; k
++)
444 idx0
= qmfs
->v_index
+ k
; idx1
= 2*k
*10;
445 output
[out
++] = FAAD_SYNTHESIS_SCALE(
446 MUL_F(qmfs
->v
[idx0
], qmf_c
[idx1
]) +
447 MUL_F(qmfs
->v
[idx0
+ 96], qmf_c
[idx1
+1]) +
448 MUL_F(qmfs
->v
[idx0
+ 128], qmf_c
[idx1
+2]) +
449 MUL_F(qmfs
->v
[idx0
+ 224], qmf_c
[idx1
+3]) +
450 MUL_F(qmfs
->v
[idx0
+ 256], qmf_c
[idx1
+4]) +
451 MUL_F(qmfs
->v
[idx0
+ 352], qmf_c
[idx1
+5]) +
452 MUL_F(qmfs
->v
[idx0
+ 384], qmf_c
[idx1
+6]) +
453 MUL_F(qmfs
->v
[idx0
+ 480], qmf_c
[idx1
+7]) +
454 MUL_F(qmfs
->v
[idx0
+ 512], qmf_c
[idx1
+8]) +
455 MUL_F(qmfs
->v
[idx0
+ 608], qmf_c
[idx1
+9]));
458 /* update ringbuffer index */
460 if (qmfs
->v_index
< 0)
461 qmfs
->v_index
= (640 - 64);
465 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
468 ALIGN real_t real1
[32];
469 ALIGN real_t imag1
[32];
470 ALIGN real_t real2
[32];
471 ALIGN real_t imag2
[32];
473 real_t
*p_buf_1
, *p_buf_3
;
475 real_t scale
= 1.f
/64.f
;
477 int32_t n
, k
, idx0
, idx1
, out
= 0;
480 /* qmf subsample l */
481 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
484 /* buffer is not shifted, we use double ringbuffer */
485 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
487 /* calculate 128 samples */
489 for (k
= 0; k
< 32; k
++)
491 idx0
= 2*k
; idx1
= idx0
+1;
492 real1
[ k
] = QMF_RE(pX
[idx0
]); imag2
[ k
] = QMF_IM(pX
[idx0
]);
493 imag1
[31-k
] = QMF_RE(pX
[idx1
]); real2
[31-k
] = QMF_IM(pX
[idx1
]);
496 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
497 dct4_kernel(real1
, imag1
);
498 dct4_kernel(real2
, imag2
);
500 p_buf_1
= qmfs
->v
+ qmfs
->v_index
;
501 p_buf_3
= p_buf_1
+ 1280;
503 idx0
= 0; idx1
= 127;
504 for (n
= 0; n
< 32; n
++)
506 p_buf_1
[idx0
] = p_buf_3
[idx0
] = real2
[ n
] - real1
[ n
]; idx0
++;
507 p_buf_1
[idx1
] = p_buf_3
[idx1
] = real2
[ n
] + real1
[ n
]; idx1
--;
508 p_buf_1
[idx0
] = p_buf_3
[idx0
] = imag2
[31-n
] + imag1
[31-n
]; idx0
++;
509 p_buf_1
[idx1
] = p_buf_3
[idx1
] = imag2
[31-n
] - imag1
[31-n
]; idx1
--;
512 p_buf_1
= qmfs
->v
+ qmfs
->v_index
;
514 /* calculate 64 output samples and window */
516 for (k
= 0; k
< 64; k
++)
519 output
[out
++] = FAAD_SYNTHESIS_SCALE(
520 MUL_F(p_buf_1
[k
], qmf_c
[idx0
]) +
521 MUL_F(p_buf_1
[k
+ 192 ], qmf_c
[idx0
+1]) +
522 MUL_F(p_buf_1
[k
+ 256 ], qmf_c
[idx0
+2]) +
523 MUL_F(p_buf_1
[k
+ 256+192], qmf_c
[idx0
+3]) +
524 MUL_F(p_buf_1
[k
+ 512 ], qmf_c
[idx0
+4]) +
525 MUL_F(p_buf_1
[k
+ 512+192], qmf_c
[idx0
+5]) +
526 MUL_F(p_buf_1
[k
+ 768 ], qmf_c
[idx0
+6]) +
527 MUL_F(p_buf_1
[k
+ 768+192], qmf_c
[idx0
+7]) +
528 MUL_F(p_buf_1
[k
+1024 ], qmf_c
[idx0
+8]) +
529 MUL_F(p_buf_1
[k
+1024+192], qmf_c
[idx0
+9]));
532 /* update ringbuffer index */
533 qmfs
->v_index
-= 128;
534 if (qmfs
->v_index
< 0)
535 qmfs
->v_index
= (1280 - 128);
538 #endif /* #ifdef SBR_LOW_POWER */
540 #endif /* #ifdef SBR_DEC */