Allocate struct demuxer with talloc
[mplayer.git] / libfaad2 / sbr_hfgen.c
blobbea3280d9ac3737a3c04295b2c19555734a369c6
1 /*
2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
14 **
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 ** Any non-GPL usage of this software or parts of this software is strictly
20 ** forbidden.
22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
25 ** $Id: sbr_hfgen.c,v 1.22 2004/09/08 09:43:11 gcp Exp $
26 **/
28 /* High Frequency generation */
30 #include "common.h"
31 #include "structs.h"
33 #ifdef SBR_DEC
35 #include "sbr_syntax.h"
36 #include "sbr_hfgen.h"
37 #include "sbr_fbt.h"
40 /* static function declarations */
41 #ifdef SBR_LOW_POWER
42 static void calc_prediction_coef_lp(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64],
43 complex_t *alpha_0, complex_t *alpha_1, real_t *rxx);
44 static void calc_aliasing_degree(sbr_info *sbr, real_t *rxx, real_t *deg);
45 #else
46 static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64],
47 complex_t *alpha_0, complex_t *alpha_1, uint8_t k);
48 #endif
49 static void calc_chirp_factors(sbr_info *sbr, uint8_t ch);
50 static void patch_construction(sbr_info *sbr);
53 void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64],
54 qmf_t Xhigh[MAX_NTSRHFG][64]
55 #ifdef SBR_LOW_POWER
56 ,real_t *deg
57 #endif
58 ,uint8_t ch)
60 uint8_t l, i, x;
61 ALIGN complex_t alpha_0[64], alpha_1[64];
62 #ifdef SBR_LOW_POWER
63 ALIGN real_t rxx[64];
64 #endif
66 uint8_t offset = sbr->tHFAdj;
67 uint8_t first = sbr->t_E[ch][0];
68 uint8_t last = sbr->t_E[ch][sbr->L_E[ch]];
70 calc_chirp_factors(sbr, ch);
72 #ifdef SBR_LOW_POWER
73 memset(deg, 0, 64*sizeof(real_t));
74 #endif
76 if ((ch == 0) && (sbr->Reset))
77 patch_construction(sbr);
79 /* calculate the prediction coefficients */
80 #ifdef SBR_LOW_POWER
81 calc_prediction_coef_lp(sbr, Xlow, alpha_0, alpha_1, rxx);
82 calc_aliasing_degree(sbr, rxx, deg);
83 #endif
85 /* actual HF generation */
86 for (i = 0; i < sbr->noPatches; i++)
88 for (x = 0; x < sbr->patchNoSubbands[i]; x++)
90 real_t a0_r, a0_i, a1_r, a1_i;
91 real_t bw, bw2;
92 uint8_t q, p, k, g;
94 /* find the low and high band for patching */
95 k = sbr->kx + x;
96 for (q = 0; q < i; q++)
98 k += sbr->patchNoSubbands[q];
100 p = sbr->patchStartSubband[i] + x;
102 #ifdef SBR_LOW_POWER
103 if (x != 0 /*x < sbr->patchNoSubbands[i]-1*/)
104 deg[k] = deg[p];
105 else
106 deg[k] = 0;
107 #endif
109 g = sbr->table_map_k_to_g[k];
111 bw = sbr->bwArray[ch][g];
112 bw2 = MUL_C(bw, bw);
114 /* do the patching */
115 /* with or without filtering */
116 if (bw2 > 0)
118 real_t temp1_r, temp2_r, temp3_r;
119 #ifndef SBR_LOW_POWER
120 real_t temp1_i, temp2_i, temp3_i;
121 calc_prediction_coef(sbr, Xlow, alpha_0, alpha_1, p);
122 #endif
124 a0_r = MUL_C(RE(alpha_0[p]), bw);
125 a1_r = MUL_C(RE(alpha_1[p]), bw2);
126 #ifndef SBR_LOW_POWER
127 a0_i = MUL_C(IM(alpha_0[p]), bw);
128 a1_i = MUL_C(IM(alpha_1[p]), bw2);
129 #endif
131 temp2_r = QMF_RE(Xlow[first - 2 + offset][p]);
132 temp3_r = QMF_RE(Xlow[first - 1 + offset][p]);
133 #ifndef SBR_LOW_POWER
134 temp2_i = QMF_IM(Xlow[first - 2 + offset][p]);
135 temp3_i = QMF_IM(Xlow[first - 1 + offset][p]);
136 #endif
137 for (l = first; l < last; l++)
139 temp1_r = temp2_r;
140 temp2_r = temp3_r;
141 temp3_r = QMF_RE(Xlow[l + offset][p]);
142 #ifndef SBR_LOW_POWER
143 temp1_i = temp2_i;
144 temp2_i = temp3_i;
145 temp3_i = QMF_IM(Xlow[l + offset][p]);
146 #endif
148 #ifdef SBR_LOW_POWER
149 QMF_RE(Xhigh[l + offset][k]) =
150 temp3_r
151 +(MUL_R(a0_r, temp2_r) +
152 MUL_R(a1_r, temp1_r));
153 #else
154 QMF_RE(Xhigh[l + offset][k]) =
155 temp3_r
156 +(MUL_R(a0_r, temp2_r) -
157 MUL_R(a0_i, temp2_i) +
158 MUL_R(a1_r, temp1_r) -
159 MUL_R(a1_i, temp1_i));
160 QMF_IM(Xhigh[l + offset][k]) =
161 temp3_i
162 +(MUL_R(a0_i, temp2_r) +
163 MUL_R(a0_r, temp2_i) +
164 MUL_R(a1_i, temp1_r) +
165 MUL_R(a1_r, temp1_i));
166 #endif
168 } else {
169 for (l = first; l < last; l++)
171 QMF_RE(Xhigh[l + offset][k]) = QMF_RE(Xlow[l + offset][p]);
172 #ifndef SBR_LOW_POWER
173 QMF_IM(Xhigh[l + offset][k]) = QMF_IM(Xlow[l + offset][p]);
174 #endif
180 if (sbr->Reset)
182 limiter_frequency_table(sbr);
186 typedef struct
188 complex_t r01;
189 complex_t r02;
190 complex_t r11;
191 complex_t r12;
192 complex_t r22;
193 real_t det;
194 } acorr_coef;
196 #ifdef SBR_LOW_POWER
197 static void auto_correlation(sbr_info *sbr, acorr_coef *ac,
198 qmf_t buffer[MAX_NTSRHFG][64],
199 uint8_t bd, uint8_t len)
201 real_t r01 = 0, r02 = 0, r11 = 0;
202 int8_t j;
203 uint8_t offset = sbr->tHFAdj;
204 #ifdef FIXED_POINT
205 const real_t rel = FRAC_CONST(0.999999); // 1 / (1 + 1e-6f);
206 uint32_t maxi = 0;
207 uint32_t pow2, exp;
208 #else
209 const real_t rel = 1 / (1 + 1e-6f);
210 #endif
213 #ifdef FIXED_POINT
214 mask = 0;
216 for (j = (offset-2); j < (len + offset); j++)
218 real_t x;
219 x = QMF_RE(buffer[j][bd])>>REAL_BITS;
220 mask |= x ^ (x >> 31);
223 exp = wl_min_lzc(mask);
225 /* improves accuracy */
226 if (exp > 0)
227 exp -= 1;
229 for (j = offset; j < len + offset; j++)
231 real_t buf_j = ((QMF_RE(buffer[j][bd])+(1<<(exp-1)))>>exp);
232 real_t buf_j_1 = ((QMF_RE(buffer[j-1][bd])+(1<<(exp-1)))>>exp);
233 real_t buf_j_2 = ((QMF_RE(buffer[j-2][bd])+(1<<(exp-1)))>>exp);
235 /* normalisation with rounding */
236 r01 += MUL_R(buf_j, buf_j_1);
237 r02 += MUL_R(buf_j, buf_j_2);
238 r11 += MUL_R(buf_j_1, buf_j_1);
240 RE(ac->r12) = r01 -
241 MUL_R(((QMF_RE(buffer[len+offset-1][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp)) +
242 MUL_R(((QMF_RE(buffer[offset-1][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp));
243 RE(ac->r22) = r11 -
244 MUL_R(((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp)) +
245 MUL_R(((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp));
246 #else
247 for (j = offset; j < len + offset; j++)
249 r01 += QMF_RE(buffer[j][bd]) * QMF_RE(buffer[j-1][bd]);
250 r02 += QMF_RE(buffer[j][bd]) * QMF_RE(buffer[j-2][bd]);
251 r11 += QMF_RE(buffer[j-1][bd]) * QMF_RE(buffer[j-1][bd]);
253 RE(ac->r12) = r01 -
254 QMF_RE(buffer[len+offset-1][bd]) * QMF_RE(buffer[len+offset-2][bd]) +
255 QMF_RE(buffer[offset-1][bd]) * QMF_RE(buffer[offset-2][bd]);
256 RE(ac->r22) = r11 -
257 QMF_RE(buffer[len+offset-2][bd]) * QMF_RE(buffer[len+offset-2][bd]) +
258 QMF_RE(buffer[offset-2][bd]) * QMF_RE(buffer[offset-2][bd]);
259 #endif
260 RE(ac->r01) = r01;
261 RE(ac->r02) = r02;
262 RE(ac->r11) = r11;
264 ac->det = MUL_R(RE(ac->r11), RE(ac->r22)) - MUL_F(MUL_R(RE(ac->r12), RE(ac->r12)), rel);
266 #else
267 static void auto_correlation(sbr_info *sbr, acorr_coef *ac, qmf_t buffer[MAX_NTSRHFG][64],
268 uint8_t bd, uint8_t len)
270 real_t r01r = 0, r01i = 0, r02r = 0, r02i = 0, r11r = 0;
271 real_t temp1_r, temp1_i, temp2_r, temp2_i, temp3_r, temp3_i, temp4_r, temp4_i, temp5_r, temp5_i;
272 #ifdef FIXED_POINT
273 const real_t rel = FRAC_CONST(0.999999); // 1 / (1 + 1e-6f);
274 uint32_t mask, exp;
275 real_t pow2_to_exp;
276 #else
277 const real_t rel = 1 / (1 + 1e-6f);
278 #endif
279 int8_t j;
280 uint8_t offset = sbr->tHFAdj;
282 #ifdef FIXED_POINT
283 mask = 0;
285 for (j = (offset-2); j < (len + offset); j++)
287 real_t x;
288 x = QMF_RE(buffer[j][bd])>>REAL_BITS;
289 mask |= x ^ (x >> 31);
290 x = QMF_IM(buffer[j][bd])>>REAL_BITS;
291 mask |= x ^ (x >> 31);
294 exp = wl_min_lzc(mask);
296 /* improves accuracy */
297 if (exp > 0)
298 exp -= 1;
300 pow2_to_exp = 1<<(exp-1);
302 temp2_r = (QMF_RE(buffer[offset-2][bd]) + pow2_to_exp) >> exp;
303 temp2_i = (QMF_IM(buffer[offset-2][bd]) + pow2_to_exp) >> exp;
304 temp3_r = (QMF_RE(buffer[offset-1][bd]) + pow2_to_exp) >> exp;
305 temp3_i = (QMF_IM(buffer[offset-1][bd]) + pow2_to_exp) >> exp;
306 // Save these because they are needed after loop
307 temp4_r = temp2_r;
308 temp4_i = temp2_i;
309 temp5_r = temp3_r;
310 temp5_i = temp3_i;
312 for (j = offset; j < len + offset; j++)
314 temp1_r = temp2_r; // temp1_r = (QMF_RE(buffer[offset-2][bd] + (1<<(exp-1))) >> exp;
315 temp1_i = temp2_i; // temp1_i = (QMF_IM(buffer[offset-2][bd] + (1<<(exp-1))) >> exp;
316 temp2_r = temp3_r; // temp2_r = (QMF_RE(buffer[offset-1][bd] + (1<<(exp-1))) >> exp;
317 temp2_i = temp3_i; // temp2_i = (QMF_IM(buffer[offset-1][bd] + (1<<(exp-1))) >> exp;
318 temp3_r = (QMF_RE(buffer[j][bd]) + pow2_to_exp) >> exp;
319 temp3_i = (QMF_IM(buffer[j][bd]) + pow2_to_exp) >> exp;
320 r01r += MUL_R(temp3_r, temp2_r) + MUL_R(temp3_i, temp2_i);
321 r01i += MUL_R(temp3_i, temp2_r) - MUL_R(temp3_r, temp2_i);
322 r02r += MUL_R(temp3_r, temp1_r) + MUL_R(temp3_i, temp1_i);
323 r02i += MUL_R(temp3_i, temp1_r) - MUL_R(temp3_r, temp1_i);
324 r11r += MUL_R(temp2_r, temp2_r) + MUL_R(temp2_i, temp2_i);
327 // These are actual values in temporary variable at this point
328 // temp1_r = (QMF_RE(buffer[len+offset-1-2][bd] + (1<<(exp-1))) >> exp;
329 // temp1_i = (QMF_IM(buffer[len+offset-1-2][bd] + (1<<(exp-1))) >> exp;
330 // temp2_r = (QMF_RE(buffer[len+offset-1-1][bd] + (1<<(exp-1))) >> exp;
331 // temp2_i = (QMF_IM(buffer[len+offset-1-1][bd] + (1<<(exp-1))) >> exp;
332 // temp3_r = (QMF_RE(buffer[len+offset-1][bd]) + (1<<(exp-1))) >> exp;
333 // temp3_i = (QMF_IM(buffer[len+offset-1][bd]) + (1<<(exp-1))) >> exp;
334 // temp4_r = (QMF_RE(buffer[offset-2][bd]) + (1<<(exp-1))) >> exp;
335 // temp4_i = (QMF_IM(buffer[offset-2][bd]) + (1<<(exp-1))) >> exp;
336 // temp5_r = (QMF_RE(buffer[offset-1][bd]) + (1<<(exp-1))) >> exp;
337 // temp5_i = (QMF_IM(buffer[offset-1][bd]) + (1<<(exp-1))) >> exp;
339 RE(ac->r12) = r01r -
340 (MUL_R(temp3_r, temp2_r) + MUL_R(temp3_i, temp2_i)) +
341 (MUL_R(temp5_r, temp4_r) + MUL_R(temp5_i, temp4_i));
342 IM(ac->r12) = r01i -
343 (MUL_R(temp3_i, temp2_r) - MUL_R(temp3_r, temp2_i)) +
344 (MUL_R(temp5_i, temp4_r) - MUL_R(temp5_r, temp4_i));
345 RE(ac->r22) = r11r -
346 (MUL_R(temp2_r, temp2_r) + MUL_R(temp2_i, temp2_i)) +
347 (MUL_R(temp4_r, temp4_r) + MUL_R(temp4_i, temp4_i));
349 #else
351 temp2_r = QMF_RE(buffer[offset-2][bd]);
352 temp2_i = QMF_IM(buffer[offset-2][bd]);
353 temp3_r = QMF_RE(buffer[offset-1][bd]);
354 temp3_i = QMF_IM(buffer[offset-1][bd]);
355 // Save these because they are needed after loop
356 temp4_r = temp2_r;
357 temp4_i = temp2_i;
358 temp5_r = temp3_r;
359 temp5_i = temp3_i;
361 for (j = offset; j < len + offset; j++)
363 temp1_r = temp2_r; // temp1_r = QMF_RE(buffer[j-2][bd];
364 temp1_i = temp2_i; // temp1_i = QMF_IM(buffer[j-2][bd];
365 temp2_r = temp3_r; // temp2_r = QMF_RE(buffer[j-1][bd];
366 temp2_i = temp3_i; // temp2_i = QMF_IM(buffer[j-1][bd];
367 temp3_r = QMF_RE(buffer[j][bd]);
368 temp3_i = QMF_IM(buffer[j][bd]);
369 r01r += temp3_r * temp2_r + temp3_i * temp2_i;
370 r01i += temp3_i * temp2_r - temp3_r * temp2_i;
371 r02r += temp3_r * temp1_r + temp3_i * temp1_i;
372 r02i += temp3_i * temp1_r - temp3_r * temp1_i;
373 r11r += temp2_r * temp2_r + temp2_i * temp2_i;
376 // These are actual values in temporary variable at this point
377 // temp1_r = QMF_RE(buffer[len+offset-1-2][bd];
378 // temp1_i = QMF_IM(buffer[len+offset-1-2][bd];
379 // temp2_r = QMF_RE(buffer[len+offset-1-1][bd];
380 // temp2_i = QMF_IM(buffer[len+offset-1-1][bd];
381 // temp3_r = QMF_RE(buffer[len+offset-1][bd]);
382 // temp3_i = QMF_IM(buffer[len+offset-1][bd]);
383 // temp4_r = QMF_RE(buffer[offset-2][bd]);
384 // temp4_i = QMF_IM(buffer[offset-2][bd]);
385 // temp5_r = QMF_RE(buffer[offset-1][bd]);
386 // temp5_i = QMF_IM(buffer[offset-1][bd]);
388 RE(ac->r12) = r01r -
389 (temp3_r * temp2_r + temp3_i * temp2_i) +
390 (temp5_r * temp4_r + temp5_i * temp4_i);
391 IM(ac->r12) = r01i -
392 (temp3_i * temp2_r - temp3_r * temp2_i) +
393 (temp5_i * temp4_r - temp5_r * temp4_i);
394 RE(ac->r22) = r11r -
395 (temp2_r * temp2_r + temp2_i * temp2_i) +
396 (temp4_r * temp4_r + temp4_i * temp4_i);
398 #endif
400 RE(ac->r01) = r01r;
401 IM(ac->r01) = r01i;
402 RE(ac->r02) = r02r;
403 IM(ac->r02) = r02i;
404 RE(ac->r11) = r11r;
406 ac->det = MUL_R(RE(ac->r11), RE(ac->r22)) - MUL_F(rel, (MUL_R(RE(ac->r12), RE(ac->r12)) + MUL_R(IM(ac->r12), IM(ac->r12))));
408 #endif
410 /* calculate linear prediction coefficients using the covariance method */
411 #ifndef SBR_LOW_POWER
412 static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64],
413 complex_t *alpha_0, complex_t *alpha_1, uint8_t k)
415 real_t tmp;
416 acorr_coef ac;
418 auto_correlation(sbr, &ac, Xlow, k, sbr->numTimeSlotsRate + 6);
420 if (ac.det == 0)
422 RE(alpha_1[k]) = 0;
423 IM(alpha_1[k]) = 0;
424 } else {
425 #ifdef FIXED_POINT
426 tmp = (MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(IM(ac.r01), IM(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11)));
427 RE(alpha_1[k]) = DIV_R(tmp, ac.det);
428 tmp = (MUL_R(IM(ac.r01), RE(ac.r12)) + MUL_R(RE(ac.r01), IM(ac.r12)) - MUL_R(IM(ac.r02), RE(ac.r11)));
429 IM(alpha_1[k]) = DIV_R(tmp, ac.det);
430 #else
431 tmp = REAL_CONST(1.0) / ac.det;
432 RE(alpha_1[k]) = (MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(IM(ac.r01), IM(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11))) * tmp;
433 IM(alpha_1[k]) = (MUL_R(IM(ac.r01), RE(ac.r12)) + MUL_R(RE(ac.r01), IM(ac.r12)) - MUL_R(IM(ac.r02), RE(ac.r11))) * tmp;
434 #endif
437 if (RE(ac.r11) == 0)
439 RE(alpha_0[k]) = 0;
440 IM(alpha_0[k]) = 0;
441 } else {
442 #ifdef FIXED_POINT
443 tmp = -(RE(ac.r01) + MUL_R(RE(alpha_1[k]), RE(ac.r12)) + MUL_R(IM(alpha_1[k]), IM(ac.r12)));
444 RE(alpha_0[k]) = DIV_R(tmp, RE(ac.r11));
445 tmp = -(IM(ac.r01) + MUL_R(IM(alpha_1[k]), RE(ac.r12)) - MUL_R(RE(alpha_1[k]), IM(ac.r12)));
446 IM(alpha_0[k]) = DIV_R(tmp, RE(ac.r11));
447 #else
448 tmp = 1.0f / RE(ac.r11);
449 RE(alpha_0[k]) = -(RE(ac.r01) + MUL_R(RE(alpha_1[k]), RE(ac.r12)) + MUL_R(IM(alpha_1[k]), IM(ac.r12))) * tmp;
450 IM(alpha_0[k]) = -(IM(ac.r01) + MUL_R(IM(alpha_1[k]), RE(ac.r12)) - MUL_R(RE(alpha_1[k]), IM(ac.r12))) * tmp;
451 #endif
454 if ((MUL_R(RE(alpha_0[k]),RE(alpha_0[k])) + MUL_R(IM(alpha_0[k]),IM(alpha_0[k])) >= REAL_CONST(16)) ||
455 (MUL_R(RE(alpha_1[k]),RE(alpha_1[k])) + MUL_R(IM(alpha_1[k]),IM(alpha_1[k])) >= REAL_CONST(16)))
457 RE(alpha_0[k]) = 0;
458 IM(alpha_0[k]) = 0;
459 RE(alpha_1[k]) = 0;
460 IM(alpha_1[k]) = 0;
463 #else
464 static void calc_prediction_coef_lp(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64],
465 complex_t *alpha_0, complex_t *alpha_1, real_t *rxx)
467 uint8_t k;
468 real_t tmp;
469 acorr_coef ac;
471 for (k = 1; k < sbr->f_master[0]; k++)
473 auto_correlation(sbr, &ac, Xlow, k, sbr->numTimeSlotsRate + 6);
475 if (ac.det == 0)
477 RE(alpha_0[k]) = 0;
478 RE(alpha_1[k]) = 0;
479 } else {
480 tmp = MUL_R(RE(ac.r01), RE(ac.r22)) - MUL_R(RE(ac.r12), RE(ac.r02));
481 RE(alpha_0[k]) = DIV_R(tmp, (-ac.det));
483 tmp = MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11));
484 RE(alpha_1[k]) = DIV_R(tmp, ac.det);
487 if ((RE(alpha_0[k]) >= REAL_CONST(4)) || (RE(alpha_1[k]) >= REAL_CONST(4)))
489 RE(alpha_0[k]) = REAL_CONST(0);
490 RE(alpha_1[k]) = REAL_CONST(0);
493 /* reflection coefficient */
494 if (RE(ac.r11) == 0)
496 rxx[k] = COEF_CONST(0.0);
497 } else {
498 rxx[k] = DIV_C(RE(ac.r01), RE(ac.r11));
499 rxx[k] = -rxx[k];
500 if (rxx[k] > COEF_CONST(1.0)) rxx[k] = COEF_CONST(1.0);
501 if (rxx[k] < COEF_CONST(-1.0)) rxx[k] = COEF_CONST(-1.0);
506 static void calc_aliasing_degree(sbr_info *sbr, real_t *rxx, real_t *deg)
508 uint8_t k;
510 rxx[0] = COEF_CONST(0.0);
511 deg[1] = COEF_CONST(0.0);
513 for (k = 2; k < sbr->k0; k++)
515 deg[k] = 0.0;
517 if ((k % 2 == 0) && (rxx[k] < COEF_CONST(0.0)))
519 if (rxx[k-1] < 0.0)
521 deg[k] = COEF_CONST(1.0);
523 if (rxx[k-2] > COEF_CONST(0.0))
525 deg[k-1] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]);
527 } else if (rxx[k-2] > COEF_CONST(0.0)) {
528 deg[k] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]);
532 if ((k % 2 == 1) && (rxx[k] > COEF_CONST(0.0)))
534 if (rxx[k-1] > COEF_CONST(0.0))
536 deg[k] = COEF_CONST(1.0);
538 if (rxx[k-2] < COEF_CONST(0.0))
540 deg[k-1] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]);
542 } else if (rxx[k-2] < COEF_CONST(0.0)) {
543 deg[k] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]);
548 #endif
550 /* FIXED POINT: bwArray = COEF */
551 static real_t mapNewBw(uint8_t invf_mode, uint8_t invf_mode_prev)
553 switch (invf_mode)
555 case 1: /* LOW */
556 if (invf_mode_prev == 0) /* NONE */
557 return COEF_CONST(0.6);
558 else
559 return COEF_CONST(0.75);
561 case 2: /* MID */
562 return COEF_CONST(0.9);
564 case 3: /* HIGH */
565 return COEF_CONST(0.98);
567 default: /* NONE */
568 if (invf_mode_prev == 1) /* LOW */
569 return COEF_CONST(0.6);
570 else
571 return COEF_CONST(0.0);
575 /* FIXED POINT: bwArray = COEF */
576 static void calc_chirp_factors(sbr_info *sbr, uint8_t ch)
578 uint8_t i;
580 for (i = 0; i < sbr->N_Q; i++)
582 sbr->bwArray[ch][i] = mapNewBw(sbr->bs_invf_mode[ch][i], sbr->bs_invf_mode_prev[ch][i]);
584 if (sbr->bwArray[ch][i] < sbr->bwArray_prev[ch][i])
585 sbr->bwArray[ch][i] = MUL_F(sbr->bwArray[ch][i], FRAC_CONST(0.75)) + MUL_F(sbr->bwArray_prev[ch][i], FRAC_CONST(0.25));
586 else
587 sbr->bwArray[ch][i] = MUL_F(sbr->bwArray[ch][i], FRAC_CONST(0.90625)) + MUL_F(sbr->bwArray_prev[ch][i], FRAC_CONST(0.09375));
589 if (sbr->bwArray[ch][i] < COEF_CONST(0.015625))
590 sbr->bwArray[ch][i] = COEF_CONST(0.0);
592 if (sbr->bwArray[ch][i] >= COEF_CONST(0.99609375))
593 sbr->bwArray[ch][i] = COEF_CONST(0.99609375);
595 sbr->bwArray_prev[ch][i] = sbr->bwArray[ch][i];
596 sbr->bs_invf_mode_prev[ch][i] = sbr->bs_invf_mode[ch][i];
600 static void patch_construction(sbr_info *sbr)
602 uint8_t i, k;
603 uint8_t odd, sb;
604 uint8_t msb = sbr->k0;
605 uint8_t usb = sbr->kx;
606 uint8_t goalSbTab[] = { 21, 23, 32, 43, 46, 64, 85, 93, 128, 0, 0, 0 };
607 /* (uint8_t)(2.048e6/sbr->sample_rate + 0.5); */
608 uint8_t goalSb = goalSbTab[get_sr_index(sbr->sample_rate)];
610 sbr->noPatches = 0;
612 if (goalSb < (sbr->kx + sbr->M))
614 for (i = 0, k = 0; sbr->f_master[i] < goalSb; i++)
615 k = i+1;
616 } else {
617 k = sbr->N_master;
620 if (sbr->N_master == 0)
622 sbr->noPatches = 0;
623 sbr->patchNoSubbands[0] = 0;
624 sbr->patchStartSubband[0] = 0;
626 return;
631 uint8_t j = k + 1;
635 j--;
637 sb = sbr->f_master[j];
638 odd = (sb - 2 + sbr->k0) % 2;
639 } while (sb > (sbr->k0 - 1 + msb - odd));
641 sbr->patchNoSubbands[sbr->noPatches] = max(sb - usb, 0);
642 sbr->patchStartSubband[sbr->noPatches] = sbr->k0 - odd -
643 sbr->patchNoSubbands[sbr->noPatches];
645 if (sbr->patchNoSubbands[sbr->noPatches] > 0)
647 usb = sb;
648 msb = sb;
649 sbr->noPatches++;
650 } else {
651 msb = sbr->kx;
654 if (sbr->f_master[k] - sb < 3)
655 k = sbr->N_master;
656 } while (sb != (sbr->kx + sbr->M));
658 if ((sbr->patchNoSubbands[sbr->noPatches-1] < 3) && (sbr->noPatches > 1))
660 sbr->noPatches--;
663 sbr->noPatches = min(sbr->noPatches, 5);
666 #endif