2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 ** Any non-GPL usage of this software or parts of this software is strictly
22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
25 ** $Id: sbr_qmf.c,v 1.27 2004/09/04 14:56:28 menno Exp $
38 #include "sbr_qmf_c.h"
39 #include "sbr_syntax.h"
41 qmfa_info
*qmfa_init(uint8_t channels
)
43 qmfa_info
*qmfa
= (qmfa_info
*)faad_malloc(sizeof(qmfa_info
));
45 /* x is implemented as double ringbuffer */
46 qmfa
->x
= (real_t
*)faad_malloc(2 * channels
* 10 * sizeof(real_t
));
47 memset(qmfa
->x
, 0, 2 * channels
* 10 * sizeof(real_t
));
49 /* ringbuffer index */
52 qmfa
->channels
= channels
;
57 void qmfa_end(qmfa_info
*qmfa
)
61 if (qmfa
->x
) faad_free(qmfa
->x
);
66 void sbr_qmf_analysis_32(sbr_info
*sbr
, qmfa_info
*qmfa
, const real_t
*input
,
67 qmf_t X
[MAX_NTSRHFG
][64], uint8_t offset
, uint8_t kx
)
71 ALIGN real_t in_real
[32], in_imag
[32], out_real
[32], out_imag
[32];
79 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
83 /* shift input buffer x */
84 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */
85 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
87 /* add new samples to input buffer x */
88 for (n
= 32 - 1; n
>= 0; n
--)
91 qmfa
->x
[qmfa
->x_index
+ n
] = qmfa
->x
[qmfa
->x_index
+ n
+ 320] = (input
[in
++]) >> 4;
93 qmfa
->x
[qmfa
->x_index
+ n
] = qmfa
->x
[qmfa
->x_index
+ n
+ 320] = input
[in
++];
97 /* window and summation to create array u */
98 for (n
= 0; n
< 64; n
++)
100 u
[n
] = MUL_F(qmfa
->x
[qmfa
->x_index
+ n
], qmf_c
[2*n
]) +
101 MUL_F(qmfa
->x
[qmfa
->x_index
+ n
+ 64], qmf_c
[2*(n
+ 64)]) +
102 MUL_F(qmfa
->x
[qmfa
->x_index
+ n
+ 128], qmf_c
[2*(n
+ 128)]) +
103 MUL_F(qmfa
->x
[qmfa
->x_index
+ n
+ 192], qmf_c
[2*(n
+ 192)]) +
104 MUL_F(qmfa
->x
[qmfa
->x_index
+ n
+ 256], qmf_c
[2*(n
+ 256)]);
107 /* update ringbuffer index */
109 if (qmfa
->x_index
< 0)
110 qmfa
->x_index
= (320-32);
112 /* calculate 32 subband samples by introducing X */
115 for (n
= 1; n
< 16; n
++)
116 y
[n
] = u
[n
+48] + u
[48-n
];
117 for (n
= 16; n
< 32; n
++)
118 y
[n
] = -u
[n
-16] + u
[48-n
];
120 DCT3_32_unscaled(u
, y
);
122 for (n
= 0; n
< 32; n
++)
127 QMF_RE(X
[l
+ offset
][n
]) = u
[n
] /*<< 1*/;
129 QMF_RE(X
[l
+ offset
][n
]) = 2. * u
[n
];
132 QMF_RE(X
[l
+ offset
][n
]) = 0;
137 // Reordering of data moved from DCT_IV to here
140 for (n
= 1; n
< 31; n
++)
142 in_imag
[31 - n
] = u
[n
+1];
143 in_real
[n
] = -u
[64-n
];
146 in_real
[31] = -u
[33];
148 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
149 dct4_kernel(in_real
, in_imag
, out_real
, out_imag
);
151 // Reordering of data moved from DCT_IV to here
152 for (n
= 0; n
< 16; n
++) {
155 QMF_RE(X
[l
+ offset
][2*n
]) = out_real
[n
];
156 QMF_IM(X
[l
+ offset
][2*n
]) = out_imag
[n
];
157 QMF_RE(X
[l
+ offset
][2*n
+1]) = -out_imag
[31-n
];
158 QMF_IM(X
[l
+ offset
][2*n
+1]) = -out_real
[31-n
];
160 QMF_RE(X
[l
+ offset
][2*n
]) = 2. * out_real
[n
];
161 QMF_IM(X
[l
+ offset
][2*n
]) = 2. * out_imag
[n
];
162 QMF_RE(X
[l
+ offset
][2*n
+1]) = -2. * out_imag
[31-n
];
163 QMF_IM(X
[l
+ offset
][2*n
+1]) = -2. * out_real
[31-n
];
168 QMF_RE(X
[l
+ offset
][2*n
]) = out_real
[n
];
169 QMF_IM(X
[l
+ offset
][2*n
]) = out_imag
[n
];
171 QMF_RE(X
[l
+ offset
][2*n
]) = 2. * out_real
[n
];
172 QMF_IM(X
[l
+ offset
][2*n
]) = 2. * out_imag
[n
];
176 QMF_RE(X
[l
+ offset
][2*n
]) = 0;
177 QMF_IM(X
[l
+ offset
][2*n
]) = 0;
179 QMF_RE(X
[l
+ offset
][2*n
+1]) = 0;
180 QMF_IM(X
[l
+ offset
][2*n
+1]) = 0;
187 static const complex_t qmf32_pre_twiddle
[] =
189 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
190 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
191 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
192 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
193 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
194 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
195 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
196 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
197 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
198 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
199 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
200 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
201 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
202 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
203 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
204 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
205 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
206 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
207 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
208 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
209 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
210 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
211 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
212 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
213 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
214 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
215 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
216 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
217 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
218 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
219 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
220 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
223 qmfs_info
*qmfs_init(uint8_t channels
)
225 qmfs_info
*qmfs
= (qmfs_info
*)faad_malloc(sizeof(qmfs_info
));
227 /* v is a double ringbuffer */
228 qmfs
->v
= (real_t
*)faad_malloc(2 * channels
* 20 * sizeof(real_t
));
229 memset(qmfs
->v
, 0, 2 * channels
* 20 * sizeof(real_t
));
233 qmfs
->channels
= channels
;
238 void qmfs_end(qmfs_info
*qmfs
)
242 if (qmfs
->v
) faad_free(qmfs
->v
);
249 void sbr_qmf_synthesis_32(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
254 int16_t n
, k
, out
= 0;
257 /* qmf subsample l */
258 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
261 /* we are not shifting v, it is a double ringbuffer */
262 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
264 /* calculate 64 samples */
265 for (k
= 0; k
< 16; k
++)
268 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][31 - k
]));
269 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][31 - k
]));
271 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][31 - k
])) / 32.0;
272 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][31 - k
])) / 32.0;
277 DCT2_16_unscaled(x
, x
);
281 for (n
= 8; n
< 24; n
++)
283 qmfs
->v
[qmfs
->v_index
+ n
*2] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2] = x
[n
-8];
284 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2+1] = y
[n
-8];
286 for (n
= 0; n
< 16; n
++)
288 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 640 + n
] = qmfs
->v
[qmfs
->v_index
+ 32-n
];
290 qmfs
->v
[qmfs
->v_index
+ 48] = qmfs
->v
[qmfs
->v_index
+ 640 + 48] = 0;
291 for (n
= 1; n
< 16; n
++)
293 qmfs
->v
[qmfs
->v_index
+ 48+n
] = qmfs
->v
[qmfs
->v_index
+ 640 + 48+n
] = -qmfs
->v
[qmfs
->v_index
+ 48-n
];
296 /* calculate 32 output samples and window */
297 for (k
= 0; k
< 32; k
++)
299 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[2*k
]) +
300 MUL_F(qmfs
->v
[qmfs
->v_index
+ 96 + k
], qmf_c
[64 + 2*k
]) +
301 MUL_F(qmfs
->v
[qmfs
->v_index
+ 128 + k
], qmf_c
[128 + 2*k
]) +
302 MUL_F(qmfs
->v
[qmfs
->v_index
+ 224 + k
], qmf_c
[192 + 2*k
]) +
303 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[256 + 2*k
]) +
304 MUL_F(qmfs
->v
[qmfs
->v_index
+ 352 + k
], qmf_c
[320 + 2*k
]) +
305 MUL_F(qmfs
->v
[qmfs
->v_index
+ 384 + k
], qmf_c
[384 + 2*k
]) +
306 MUL_F(qmfs
->v
[qmfs
->v_index
+ 480 + k
], qmf_c
[448 + 2*k
]) +
307 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[512 + 2*k
]) +
308 MUL_F(qmfs
->v
[qmfs
->v_index
+ 608 + k
], qmf_c
[576 + 2*k
]);
311 /* update the ringbuffer index */
313 if (qmfs
->v_index
< 0)
314 qmfs
->v_index
= (640-64);
318 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
323 int16_t n
, k
, out
= 0;
327 /* qmf subsample l */
328 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
331 /* we are not shifting v, it is a double ringbuffer */
332 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
334 /* calculate 128 samples */
335 for (k
= 0; k
< 32; k
++)
338 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][63 - k
]));
339 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][63 - k
]));
341 y
[k
] = (QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][63 - k
])) / 32.0;
342 x
[k
] = (QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][63 - k
])) / 32.0;
347 DCT2_32_unscaled(x
, x
);
351 for (n
= 16; n
< 48; n
++)
353 qmfs
->v
[qmfs
->v_index
+ n
*2] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2] = x
[n
-16];
354 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2+1] = y
[n
-16];
356 for (n
= 0; n
< 32; n
++)
358 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
] = qmfs
->v
[qmfs
->v_index
+ 64-n
];
360 qmfs
->v
[qmfs
->v_index
+ 96] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96] = 0;
361 for (n
= 1; n
< 32; n
++)
363 qmfs
->v
[qmfs
->v_index
+ 96+n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96+n
] = -qmfs
->v
[qmfs
->v_index
+ 96-n
];
366 /* calculate 64 output samples and window */
367 for (k
= 0; k
< 64; k
++)
369 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[k
]) +
370 MUL_F(qmfs
->v
[qmfs
->v_index
+ 192 + k
], qmf_c
[64 + k
]) +
371 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[128 + k
]) +
372 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + 192 + k
], qmf_c
[128 + 64 + k
]) +
373 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[256 + k
]) +
374 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + 192 + k
], qmf_c
[256 + 64 + k
]) +
375 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + k
], qmf_c
[384 + k
]) +
376 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + 192 + k
], qmf_c
[384 + 64 + k
]) +
377 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + k
], qmf_c
[512 + k
]) +
378 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + 192 + k
], qmf_c
[512 + 64 + k
]);
381 /* update the ringbuffer index */
382 qmfs
->v_index
-= 128;
383 if (qmfs
->v_index
< 0)
384 qmfs
->v_index
= (1280-128);
388 void sbr_qmf_synthesis_32(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
391 ALIGN real_t x1
[32], x2
[32];
393 real_t scale
= 1.f
/64.f
;
395 int16_t n
, k
, out
= 0;
399 /* qmf subsample l */
400 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
403 /* buffer is not shifted, we are using a ringbuffer */
404 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
406 /* calculate 64 samples */
407 /* complex pre-twiddle */
408 for (k
= 0; k
< 32; k
++)
410 x1
[k
] = MUL_F(QMF_RE(X
[l
][k
]), RE(qmf32_pre_twiddle
[k
])) - MUL_F(QMF_IM(X
[l
][k
]), IM(qmf32_pre_twiddle
[k
]));
411 x2
[k
] = MUL_F(QMF_IM(X
[l
][k
]), RE(qmf32_pre_twiddle
[k
])) + MUL_F(QMF_RE(X
[l
][k
]), IM(qmf32_pre_twiddle
[k
]));
426 for (n
= 0; n
< 32; n
++)
428 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 640 + n
] = -x1
[n
] + x2
[n
];
429 qmfs
->v
[qmfs
->v_index
+ 63 - n
] = qmfs
->v
[qmfs
->v_index
+ 640 + 63 - n
] = x1
[n
] + x2
[n
];
432 /* calculate 32 output samples and window */
433 for (k
= 0; k
< 32; k
++)
435 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[2*k
]) +
436 MUL_F(qmfs
->v
[qmfs
->v_index
+ 96 + k
], qmf_c
[64 + 2*k
]) +
437 MUL_F(qmfs
->v
[qmfs
->v_index
+ 128 + k
], qmf_c
[128 + 2*k
]) +
438 MUL_F(qmfs
->v
[qmfs
->v_index
+ 224 + k
], qmf_c
[192 + 2*k
]) +
439 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[256 + 2*k
]) +
440 MUL_F(qmfs
->v
[qmfs
->v_index
+ 352 + k
], qmf_c
[320 + 2*k
]) +
441 MUL_F(qmfs
->v
[qmfs
->v_index
+ 384 + k
], qmf_c
[384 + 2*k
]) +
442 MUL_F(qmfs
->v
[qmfs
->v_index
+ 480 + k
], qmf_c
[448 + 2*k
]) +
443 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[512 + 2*k
]) +
444 MUL_F(qmfs
->v
[qmfs
->v_index
+ 608 + k
], qmf_c
[576 + 2*k
]);
447 /* update ringbuffer index */
449 if (qmfs
->v_index
< 0)
450 qmfs
->v_index
= (640 - 64);
454 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSRHFG
][64],
457 // ALIGN real_t x1[64], x2[64];
458 #ifndef SBR_LOW_POWER
459 ALIGN real_t in_real1
[32], in_imag1
[32], out_real1
[32], out_imag1
[32];
460 ALIGN real_t in_real2
[32], in_imag2
[32], out_real2
[32], out_imag2
[32];
463 real_t
* pring_buffer_1
, * pring_buffer_3
;
464 // real_t * ptemp_1, * ptemp_2;
465 #ifdef PREFER_POINTERS
466 // These pointers are used if target platform has autoinc address generators
467 real_t
* pring_buffer_2
, * pring_buffer_4
;
468 real_t
* pring_buffer_5
, * pring_buffer_6
;
469 real_t
* pring_buffer_7
, * pring_buffer_8
;
470 real_t
* pring_buffer_9
, * pring_buffer_10
;
471 const real_t
* pqmf_c_1
, * pqmf_c_2
, * pqmf_c_3
, * pqmf_c_4
;
472 const real_t
* pqmf_c_5
, * pqmf_c_6
, * pqmf_c_7
, * pqmf_c_8
;
473 const real_t
* pqmf_c_9
, * pqmf_c_10
;
474 #endif // #ifdef PREFER_POINTERS
476 real_t scale
= 1.f
/64.f
;
478 int16_t n
, k
, out
= 0;
482 /* qmf subsample l */
483 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
486 /* buffer is not shifted, we use double ringbuffer */
487 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
489 /* calculate 128 samples */
494 in_imag1
[31] = scale
*QMF_RE(pX
[1]);
495 in_real1
[0] = scale
*QMF_RE(pX
[0]);
496 in_imag2
[31] = scale
*QMF_IM(pX
[63-1]);
497 in_real2
[0] = scale
*QMF_IM(pX
[63-0]);
498 for (k
= 1; k
< 31; k
++)
500 in_imag1
[31 - k
] = scale
*QMF_RE(pX
[2*k
+ 1]);
501 in_real1
[ k
] = scale
*QMF_RE(pX
[2*k
]);
502 in_imag2
[31 - k
] = scale
*QMF_IM(pX
[63 - (2*k
+ 1)]);
503 in_real2
[ k
] = scale
*QMF_IM(pX
[63 - (2*k
)]);
505 in_imag1
[0] = scale
*QMF_RE(pX
[63]);
506 in_real1
[31] = scale
*QMF_RE(pX
[62]);
507 in_imag2
[0] = scale
*QMF_IM(pX
[63-63]);
508 in_real2
[31] = scale
*QMF_IM(pX
[63-62]);
514 in_imag1
[31] = QMF_RE(pX
[1]) >> 1;
515 in_real1
[0] = QMF_RE(pX
[0]) >> 1;
516 in_imag2
[31] = QMF_IM(pX
[62]) >> 1;
517 in_real2
[0] = QMF_IM(pX
[63]) >> 1;
518 for (k
= 1; k
< 31; k
++)
520 in_imag1
[31 - k
] = QMF_RE(pX
[2*k
+ 1]) >> 1;
521 in_real1
[ k
] = QMF_RE(pX
[2*k
]) >> 1;
522 in_imag2
[31 - k
] = QMF_IM(pX
[63 - (2*k
+ 1)]) >> 1;
523 in_real2
[ k
] = QMF_IM(pX
[63 - (2*k
)]) >> 1;
525 in_imag1
[0] = QMF_RE(pX
[63]) >> 1;
526 in_real1
[31] = QMF_RE(pX
[62]) >> 1;
527 in_imag2
[0] = QMF_IM(pX
[0]) >> 1;
528 in_real2
[31] = QMF_IM(pX
[1]) >> 1;
533 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
534 dct4_kernel(in_real1
, in_imag1
, out_real1
, out_imag1
);
535 dct4_kernel(in_real2
, in_imag2
, out_real2
, out_imag2
);
538 pring_buffer_1
= qmfs
->v
+ qmfs
->v_index
;
539 pring_buffer_3
= pring_buffer_1
+ 1280;
540 #ifdef PREFER_POINTERS
541 pring_buffer_2
= pring_buffer_1
+ 127;
542 pring_buffer_4
= pring_buffer_1
+ (1280 + 127);
543 #endif // #ifdef PREFER_POINTERS
546 #ifdef PREFER_POINTERS
547 for (n
= 0; n
< 32; n
++)
549 //real_t x1 = *ptemp_1++;
550 //real_t x2 = *ptemp_2++;
551 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
552 *pring_buffer_1
++ = *pring_buffer_3
++ = out_real2
[n
] - out_real1
[n
];
553 *pring_buffer_2
-- = *pring_buffer_4
-- = out_real2
[n
] + out_real1
[n
];
556 *pring_buffer_1
++ = *pring_buffer_3
++ = out_imag2
[31-n
] + out_imag1
[31-n
];
557 *pring_buffer_2
-- = *pring_buffer_4
-- = out_imag2
[31-n
] - out_imag1
[31-n
];
559 #else // #ifdef PREFER_POINTERS
561 for (n
= 0; n
< 32; n
++)
563 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
564 pring_buffer_1
[2*n
] = pring_buffer_3
[2*n
] = out_real2
[n
] - out_real1
[n
];
565 pring_buffer_1
[127-2*n
] = pring_buffer_3
[127-2*n
] = out_real2
[n
] + out_real1
[n
];
566 pring_buffer_1
[2*n
+1] = pring_buffer_3
[2*n
+1] = out_imag2
[31-n
] + out_imag1
[31-n
];
567 pring_buffer_1
[127-(2*n
+1)] = pring_buffer_3
[127-(2*n
+1)] = out_imag2
[31-n
] - out_imag1
[31-n
];
570 #endif // #ifdef PREFER_POINTERS
572 pring_buffer_1
= qmfs
->v
+ qmfs
->v_index
;
573 #ifdef PREFER_POINTERS
574 pring_buffer_2
= pring_buffer_1
+ 192;
575 pring_buffer_3
= pring_buffer_1
+ 256;
576 pring_buffer_4
= pring_buffer_1
+ (256 + 192);
577 pring_buffer_5
= pring_buffer_1
+ 512;
578 pring_buffer_6
= pring_buffer_1
+ (512 + 192);
579 pring_buffer_7
= pring_buffer_1
+ 768;
580 pring_buffer_8
= pring_buffer_1
+ (768 + 192);
581 pring_buffer_9
= pring_buffer_1
+ 1024;
582 pring_buffer_10
= pring_buffer_1
+ (1024 + 192);
584 pqmf_c_2
= qmf_c
+ 64;
585 pqmf_c_3
= qmf_c
+ 128;
586 pqmf_c_4
= qmf_c
+ 192;
587 pqmf_c_5
= qmf_c
+ 256;
588 pqmf_c_6
= qmf_c
+ 320;
589 pqmf_c_7
= qmf_c
+ 384;
590 pqmf_c_8
= qmf_c
+ 448;
591 pqmf_c_9
= qmf_c
+ 512;
592 pqmf_c_10
= qmf_c
+ 576;
593 #endif // #ifdef PREFER_POINTERS
595 /* calculate 64 output samples and window */
596 for (k
= 0; k
< 64; k
++)
598 #ifdef PREFER_POINTERS
600 MUL_F(*pring_buffer_1
++, *pqmf_c_1
++) +
601 MUL_F(*pring_buffer_2
++, *pqmf_c_2
++) +
602 MUL_F(*pring_buffer_3
++, *pqmf_c_3
++) +
603 MUL_F(*pring_buffer_4
++, *pqmf_c_4
++) +
604 MUL_F(*pring_buffer_5
++, *pqmf_c_5
++) +
605 MUL_F(*pring_buffer_6
++, *pqmf_c_6
++) +
606 MUL_F(*pring_buffer_7
++, *pqmf_c_7
++) +
607 MUL_F(*pring_buffer_8
++, *pqmf_c_8
++) +
608 MUL_F(*pring_buffer_9
++, *pqmf_c_9
++) +
609 MUL_F(*pring_buffer_10
++, *pqmf_c_10
++);
610 #else // #ifdef PREFER_POINTERS
612 MUL_F(pring_buffer_1
[k
+0], qmf_c
[k
+0]) +
613 MUL_F(pring_buffer_1
[k
+192], qmf_c
[k
+64]) +
614 MUL_F(pring_buffer_1
[k
+256], qmf_c
[k
+128]) +
615 MUL_F(pring_buffer_1
[k
+(256+192)], qmf_c
[k
+192]) +
616 MUL_F(pring_buffer_1
[k
+512], qmf_c
[k
+256]) +
617 MUL_F(pring_buffer_1
[k
+(512+192)], qmf_c
[k
+320]) +
618 MUL_F(pring_buffer_1
[k
+768], qmf_c
[k
+384]) +
619 MUL_F(pring_buffer_1
[k
+(768+192)], qmf_c
[k
+448]) +
620 MUL_F(pring_buffer_1
[k
+1024], qmf_c
[k
+512]) +
621 MUL_F(pring_buffer_1
[k
+(1024+192)], qmf_c
[k
+576]);
622 #endif // #ifdef PREFER_POINTERS
625 /* update ringbuffer index */
626 qmfs
->v_index
-= 128;
627 if (qmfs
->v_index
< 0)
628 qmfs
->v_index
= (1280 - 128);