2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 ** Any non-GPL usage of this software or parts of this software is strictly
22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
38 #include "sbr_qmf_c.h"
39 #include "sbr_syntax.h"
42 #define FAAD_SYNTHESIS_SCALE(X) ((X)>>1)
43 #define FAAD_ANALYSIS_SCALE1(X) ((X)>>4)
44 #define FAAD_ANALYSIS_SCALE2(X) ((X))
45 #define FAAD_ANALYSIS_SCALE3(X) ((X))
47 #define FAAD_SYNTHESIS_SCALE(X) ((X)/64.0f)
48 #define FAAD_ANALYSIS_SCALE1(X) ((X))
49 #define FAAD_ANALYSIS_SCALE2(X) (2.0f*(X))
50 #define FAAD_ANALYSIS_SCALE3(X) ((X)/32.0f)
54 void sbr_qmf_analysis_32(sbr_info
*sbr
, qmfa_info
*qmfa
, const real_t
*input
,
55 qmf_t X
[MAX_NTSR
][64], uint8_t offset
, uint8_t kx
)
57 real_t u
[64] MEM_ALIGN_ATTR
;
59 real_t real
[32] MEM_ALIGN_ATTR
;
60 real_t imag
[32] MEM_ALIGN_ATTR
;
62 real_t y
[32] MEM_ALIGN_ATTR
;
66 uint32_t l
, idx0
, idx1
;
69 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
73 /* shift input buffer x */
74 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */
75 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
77 /* add new samples to input buffer x */
78 idx0
= qmfa
->x_index
+ 31; idx1
= idx0
+ 320;
79 for (n
= 0; n
< 32; n
+=4)
81 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
82 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
83 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
84 qmfa
->x
[idx0
--] = qmfa
->x
[idx1
--] = (input
[in
++]);
87 /* window and summation to create array u */
88 for (n
= 0; n
< 32; n
++)
90 idx0
= qmfa
->x_index
+ n
; idx1
= n
* 20;
91 u
[n
] = FAAD_ANALYSIS_SCALE1(
92 MUL_F(qmfa
->x
[idx0
], qmf_c
[idx1
]) +
93 MUL_F(qmfa
->x
[idx0
+ 64], qmf_c
[idx1
+ 2]) +
94 MUL_F(qmfa
->x
[idx0
+ 128], qmf_c
[idx1
+ 4]) +
95 MUL_F(qmfa
->x
[idx0
+ 192], qmf_c
[idx1
+ 6]) +
96 MUL_F(qmfa
->x
[idx0
+ 256], qmf_c
[idx1
+ 8]));
98 for (n
= 32; n
< 64; n
++)
100 idx0
= qmfa
->x_index
+ n
; idx1
= n
* 20 - 639;
101 u
[n
] = FAAD_ANALYSIS_SCALE1(
102 MUL_F(qmfa
->x
[idx0
], qmf_c
[idx1
]) +
103 MUL_F(qmfa
->x
[idx0
+ 64], qmf_c
[idx1
+ 2]) +
104 MUL_F(qmfa
->x
[idx0
+ 128], qmf_c
[idx1
+ 4]) +
105 MUL_F(qmfa
->x
[idx0
+ 192], qmf_c
[idx1
+ 6]) +
106 MUL_F(qmfa
->x
[idx0
+ 256], qmf_c
[idx1
+ 8]));
109 /* update ringbuffer index */
111 if (qmfa
->x_index
< 0)
112 qmfa
->x_index
= (320-32);
114 /* calculate 32 subband samples by introducing X */
117 for (n
= 1; n
< 16; n
++)
118 y
[n
] = u
[n
+48] + u
[48-n
];
119 for (n
= 16; n
< 32; n
++)
120 y
[n
] = -u
[n
-16] + u
[48-n
];
122 DCT3_32_unscaled(u
, y
);
124 for (n
= 0; n
< 32; n
++)
128 QMF_RE(X
[l
+ offset
][n
]) = FAAD_ANALYSIS_SCALE2(u
[n
]);
130 QMF_RE(X
[l
+ offset
][n
]) = 0;
133 #else /* #ifdef SBR_LOW_POWER */
135 // Reordering of data moved from DCT_IV to here
136 idx0
= 30; idx1
= 63;
137 imag
[31] = u
[ 1]; real
[ 0] = u
[ 0];
138 for (n
= 1; n
< 31; n
+=3)
140 imag
[idx0
--] = u
[n
+1]; real
[n
] = -u
[idx1
--];
141 imag
[idx0
--] = u
[n
+2]; real
[n
+1] = -u
[idx1
--];
142 imag
[idx0
--] = u
[n
+3]; real
[n
+2] = -u
[idx1
--];
144 imag
[ 0] = u
[32]; real
[31] = -u
[33];
146 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
147 dct4_kernel(real
, imag
);
149 // Reordering of data moved from DCT_IV to here
150 /* Step 1: Calculate all non-zero pairs */
152 for (n
= 0; n
< kx
/2; n
++) {
153 idx0
= 2*n
; idx1
= idx0
+ 1;
154 QMF_RE(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( real
[n
]);
155 QMF_IM(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( imag
[n
]);
156 QMF_RE(pX
[idx1
]) = FAAD_ANALYSIS_SCALE2(-imag
[31-n
]);
157 QMF_IM(pX
[idx1
]) = FAAD_ANALYSIS_SCALE2(-real
[31-n
]);
159 /* Step 2: Calculate a single pair with half zero'ed */
161 idx0
= 2*n
; idx1
= idx0
+ 1;
162 QMF_RE(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( real
[n
]);
163 QMF_IM(pX
[idx0
]) = FAAD_ANALYSIS_SCALE2( imag
[n
]);
164 QMF_RE(pX
[idx1
]) = QMF_IM(pX
[idx1
]) = 0;
167 /* Step 3: All other are zero'ed */
168 for (; n
< 16; n
++) {
169 idx0
= 2*n
; idx1
= idx0
+ 1;
170 QMF_RE(pX
[idx0
]) = QMF_IM(pX
[idx0
]) = 0;
171 QMF_RE(pX
[idx1
]) = QMF_IM(pX
[idx1
]) = 0;
173 #endif /* #ifdef SBR_LOW_POWER */
179 void sbr_qmf_synthesis_32(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSR
][64],
182 real_t x
[16] MEM_ALIGN_ATTR
;
183 real_t y
[16] MEM_ALIGN_ATTR
;
184 int16_t n
, k
, out
= 0;
187 /* qmf subsample l */
188 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
191 /* we are not shifting v, it is a double ringbuffer */
192 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
194 /* calculate 64 samples */
195 for (k
= 0; k
< 16; k
++)
197 y
[k
] = FAAD_ANALYSIS_SCALE3((QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][31-k
])));
198 x
[k
] = FAAD_ANALYSIS_SCALE3((QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][31-k
])));
202 DCT2_16_unscaled(x
, x
);
206 for (n
= 8; n
< 24; n
++)
208 qmfs
->v
[qmfs
->v_index
+ n
*2 ] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2 ] = x
[n
-8];
209 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 640 + n
*2+1] = y
[n
-8];
211 for (n
= 0; n
< 16; n
++)
213 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 640 + n
] = qmfs
->v
[qmfs
->v_index
+ 32-n
];
215 qmfs
->v
[qmfs
->v_index
+ 48] = qmfs
->v
[qmfs
->v_index
+ 640 + 48] = 0;
216 for (n
= 1; n
< 16; n
++)
218 qmfs
->v
[qmfs
->v_index
+ 48+n
] = qmfs
->v
[qmfs
->v_index
+ 640 + 48+n
] = -qmfs
->v
[qmfs
->v_index
+ 48-n
];
221 /* calculate 32 output samples and window */
222 for (k
= 0; k
< 32; k
++)
224 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[ 2*k
*10]) +
225 MUL_F(qmfs
->v
[qmfs
->v_index
+ 96 + k
], qmf_c
[1 + 2*k
*10]) +
226 MUL_F(qmfs
->v
[qmfs
->v_index
+ 128 + k
], qmf_c
[2 + 2*k
*10]) +
227 MUL_F(qmfs
->v
[qmfs
->v_index
+ 224 + k
], qmf_c
[3 + 2*k
*10]) +
228 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[4 + 2*k
*10]) +
229 MUL_F(qmfs
->v
[qmfs
->v_index
+ 352 + k
], qmf_c
[5 + 2*k
*10]) +
230 MUL_F(qmfs
->v
[qmfs
->v_index
+ 384 + k
], qmf_c
[6 + 2*k
*10]) +
231 MUL_F(qmfs
->v
[qmfs
->v_index
+ 480 + k
], qmf_c
[7 + 2*k
*10]) +
232 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[8 + 2*k
*10]) +
233 MUL_F(qmfs
->v
[qmfs
->v_index
+ 608 + k
], qmf_c
[9 + 2*k
*10]);
236 /* update the ringbuffer index */
238 if (qmfs
->v_index
< 0)
239 qmfs
->v_index
= (640-64);
243 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSR
][64],
246 real_t x
[64] MEM_ALIGN_ATTR
;
247 real_t y
[64] MEM_ALIGN_ATTR
;
248 int16_t n
, k
, out
= 0;
252 /* qmf subsample l */
253 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
256 /* we are not shifting v, it is a double ringbuffer */
257 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
259 /* calculate 128 samples */
260 for (k
= 0; k
< 32; k
++)
262 y
[k
] = FAAD_ANALYSIS_SCALE3((QMF_RE(X
[l
][k
]) - QMF_RE(X
[l
][63-k
])));
263 x
[k
] = FAAD_ANALYSIS_SCALE3((QMF_RE(X
[l
][k
]) + QMF_RE(X
[l
][63-k
])));
267 DCT2_32_unscaled(x
, x
);
271 for (n
= 16; n
< 48; n
++)
273 qmfs
->v
[qmfs
->v_index
+ n
*2] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2 ] = x
[n
-16];
274 qmfs
->v
[qmfs
->v_index
+ n
*2+1] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
*2+1] = y
[n
-16];
276 for (n
= 0; n
< 32; n
++)
278 qmfs
->v
[qmfs
->v_index
+ n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + n
] = qmfs
->v
[qmfs
->v_index
+ 64-n
];
280 qmfs
->v
[qmfs
->v_index
+ 96] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96] = 0;
281 for (n
= 1; n
< 32; n
++)
283 qmfs
->v
[qmfs
->v_index
+ 96+n
] = qmfs
->v
[qmfs
->v_index
+ 1280 + 96+n
] = -qmfs
->v
[qmfs
->v_index
+ 96-n
];
286 /* calculate 64 output samples and window */
287 for (k
= 0; k
< 64; k
++)
289 output
[out
++] = MUL_F(qmfs
->v
[qmfs
->v_index
+ k
], qmf_c
[ k
*10]) +
290 MUL_F(qmfs
->v
[qmfs
->v_index
+ 192 + k
], qmf_c
[1 + k
*10]) +
291 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + k
], qmf_c
[2 + k
*10]) +
292 MUL_F(qmfs
->v
[qmfs
->v_index
+ 256 + 192 + k
], qmf_c
[3 + k
*10]) +
293 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + k
], qmf_c
[4 + k
*10]) +
294 MUL_F(qmfs
->v
[qmfs
->v_index
+ 512 + 192 + k
], qmf_c
[5 + k
*10]) +
295 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + k
], qmf_c
[6 + k
*10]) +
296 MUL_F(qmfs
->v
[qmfs
->v_index
+ 768 + 192 + k
], qmf_c
[7 + k
*10]) +
297 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + k
], qmf_c
[8 + k
*10]) +
298 MUL_F(qmfs
->v
[qmfs
->v_index
+ 1024 + 192 + k
], qmf_c
[9 + k
*10]);
301 /* update the ringbuffer index */
302 qmfs
->v_index
-= 128;
303 if (qmfs
->v_index
< 0)
304 qmfs
->v_index
= (1280-128);
307 #else /* #ifdef SBR_LOW_POWER */
309 static const complex_t qmf32_pre_twiddle
[] =
311 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
312 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
313 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
314 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
315 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
316 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
317 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
318 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
319 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
320 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
321 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
322 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
323 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
324 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
325 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
326 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
327 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
328 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
329 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
330 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
331 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
332 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
333 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
334 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
335 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
336 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
337 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
338 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
339 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
340 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
341 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
342 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
345 #define FAAD_CMPLX_PRETWIDDLE_SUB(k) \
346 (MUL_F(QMF_RE(X[l][k]), RE(qmf32_pre_twiddle[k])) - \
347 MUL_F(QMF_IM(X[l][k]), IM(qmf32_pre_twiddle[k])))
349 #define FAAD_CMPLX_PRETWIDDLE_ADD(k) \
350 (MUL_F(QMF_IM(X[l][k]), RE(qmf32_pre_twiddle[k])) + \
351 MUL_F(QMF_RE(X[l][k]), IM(qmf32_pre_twiddle[k])))
353 void sbr_qmf_synthesis_32(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSR
][64],
356 real_t x1
[32] MEM_ALIGN_ATTR
;
357 real_t x2
[32] MEM_ALIGN_ATTR
;
358 int32_t n
, k
, idx0
, idx1
, out
= 0;
361 /* qmf subsample l */
362 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
365 /* buffer is not shifted, we are using a ringbuffer */
366 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
368 /* calculate 64 samples */
369 /* complex pre-twiddle */
372 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
373 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
374 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
375 x1
[k
] = FAAD_CMPLX_PRETWIDDLE_SUB(k
); x2
[k
] = FAAD_CMPLX_PRETWIDDLE_ADD(k
); k
++;
382 idx0
= qmfs
->v_index
;
383 idx1
= qmfs
->v_index
+ 63;
384 for (n
= 0; n
< 32; n
+=2)
386 qmfs
->v
[idx0
] = qmfs
->v
[idx0
+ 640] = -x1
[n
] + x2
[n
]; idx0
++;
387 qmfs
->v
[idx1
] = qmfs
->v
[idx1
+ 640] = x1
[n
] + x2
[n
]; idx1
--;
388 qmfs
->v
[idx0
] = qmfs
->v
[idx0
+ 640] = -x1
[n
+1] + x2
[n
+1]; idx0
++;
389 qmfs
->v
[idx1
] = qmfs
->v
[idx1
+ 640] = x1
[n
+1] + x2
[n
+1]; idx1
--;
392 /* calculate 32 output samples and window */
393 for (k
= 0; k
< 32; k
++)
395 idx0
= qmfs
->v_index
+ k
; idx1
= 2*k
*10;
396 output
[out
++] = FAAD_SYNTHESIS_SCALE(
397 MUL_F(qmfs
->v
[idx0
], qmf_c
[idx1
]) +
398 MUL_F(qmfs
->v
[idx0
+ 96], qmf_c
[idx1
+1]) +
399 MUL_F(qmfs
->v
[idx0
+ 128], qmf_c
[idx1
+2]) +
400 MUL_F(qmfs
->v
[idx0
+ 224], qmf_c
[idx1
+3]) +
401 MUL_F(qmfs
->v
[idx0
+ 256], qmf_c
[idx1
+4]) +
402 MUL_F(qmfs
->v
[idx0
+ 352], qmf_c
[idx1
+5]) +
403 MUL_F(qmfs
->v
[idx0
+ 384], qmf_c
[idx1
+6]) +
404 MUL_F(qmfs
->v
[idx0
+ 480], qmf_c
[idx1
+7]) +
405 MUL_F(qmfs
->v
[idx0
+ 512], qmf_c
[idx1
+8]) +
406 MUL_F(qmfs
->v
[idx0
+ 608], qmf_c
[idx1
+9]));
409 /* update ringbuffer index */
411 if (qmfs
->v_index
< 0)
412 qmfs
->v_index
= (640 - 64);
416 void sbr_qmf_synthesis_64(sbr_info
*sbr
, qmfs_info
*qmfs
, qmf_t X
[MAX_NTSR
][64],
419 real_t real1
[32] MEM_ALIGN_ATTR
;
420 real_t imag1
[32] MEM_ALIGN_ATTR
;
421 real_t real2
[32] MEM_ALIGN_ATTR
;
422 real_t imag2
[32] MEM_ALIGN_ATTR
;
424 real_t
*p_buf_1
, *p_buf_3
;
425 int32_t n
, k
, idx0
, idx1
, out
= 0;
428 /* qmf subsample l */
429 for (l
= 0; l
< sbr
->numTimeSlotsRate
; l
++)
432 /* buffer is not shifted, we use double ringbuffer */
433 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
435 /* calculate 128 samples */
437 for (k
= 0; k
< 32; k
++)
439 idx0
= 2*k
; idx1
= idx0
+1;
440 real1
[ k
] = QMF_RE(pX
[idx0
]); imag2
[ k
] = QMF_IM(pX
[idx0
]);
441 imag1
[31-k
] = QMF_RE(pX
[idx1
]); real2
[31-k
] = QMF_IM(pX
[idx1
]);
444 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
445 dct4_kernel(real1
, imag1
);
446 dct4_kernel(real2
, imag2
);
448 p_buf_1
= qmfs
->v
+ qmfs
->v_index
;
449 p_buf_3
= p_buf_1
+ 1280;
451 idx0
= 0; idx1
= 127;
452 for (n
= 0; n
< 32; n
++)
454 p_buf_1
[idx0
] = p_buf_3
[idx0
] = real2
[ n
] - real1
[ n
]; idx0
++;
455 p_buf_1
[idx1
] = p_buf_3
[idx1
] = real2
[ n
] + real1
[ n
]; idx1
--;
456 p_buf_1
[idx0
] = p_buf_3
[idx0
] = imag2
[31-n
] + imag1
[31-n
]; idx0
++;
457 p_buf_1
[idx1
] = p_buf_3
[idx1
] = imag2
[31-n
] - imag1
[31-n
]; idx1
--;
460 p_buf_1
= qmfs
->v
+ qmfs
->v_index
;
462 /* calculate 64 output samples and window */
464 const real_t
*qtab
= qmf_c
;
465 real_t
*pbuf
= p_buf_1
;
466 for (k
= 0; k
< 64; k
++, pbuf
++)
468 real_t
*pout
= &output
[out
++];
470 "ldmia %[qtab]!, { r0-r3 } \n\t"
471 "ldr r4, [%[pbuf]] \n\t"
472 "ldr r7, [%[pbuf], #192*4] \n\t"
473 "smull r5, r6, r4, r0 \n\t"
474 "ldr r4, [%[pbuf], #256*4] \n\t"
475 "smlal r5, r6, r7, r1 \n\t"
476 "ldr r7, [%[pbuf], #448*4] \n\t"
477 "smlal r5, r6, r4, r2 \n\t"
478 "ldr r4, [%[pbuf], #512*4] \n\t"
479 "smlal r5, r6, r7, r3 \n\t"
481 "ldmia %[qtab]!, { r0-r3 } \n\t"
482 "ldr r7, [%[pbuf], #704*4] \n\t"
483 "smlal r5, r6, r4, r0 \n\t"
484 "ldr r4, [%[pbuf], #768*4] \n\t"
485 "smlal r5, r6, r7, r1 \n\t"
486 "ldr r7, [%[pbuf], #960*4] \n\t"
487 "smlal r5, r6, r4, r2 \n\t"
488 "mov r2, #1024*4 \n\t"
490 "ldmia %[qtab]!, { r0-r1 } \n\t"
491 "ldr r4, [%[pbuf], r2] \n\t"
492 "smlal r5, r6, r7, r3 \n\t"
493 "mov r2, #1216*4 \n\t"
494 "ldr r7, [%[pbuf], r2] \n\t"
495 "smlal r5, r6, r4, r0 \n\t"
496 "smlal r5, r6, r7, r1 \n\t"
498 "str r6, [%[pout]] \n"
500 : [pbuf
] "r" (pbuf
), [pout
] "r" (pout
)
501 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "memory");
503 #elif defined CPU_COLDFIRE
504 const real_t
*qtab
= qmf_c
;
505 real_t
*pbuf
= p_buf_1
;
506 for (k
= 0; k
< 64; k
++, pbuf
++)
508 real_t
*pout
= &output
[out
++];
510 "move.l (%[pbuf]), %%d5 \n"
512 "movem.l (%[qtab]), %%d0-%%d4 \n"
513 "mac.l %%d0, %%d5, (192*4, %[pbuf]), %%d5, %%acc0 \n"
514 "mac.l %%d1, %%d5, (256*4, %[pbuf]), %%d5, %%acc0 \n"
515 "mac.l %%d2, %%d5, (448*4, %[pbuf]), %%d5, %%acc0 \n"
516 "mac.l %%d3, %%d5, (512*4, %[pbuf]), %%d5, %%acc0 \n"
517 "mac.l %%d4, %%d5, (704*4, %[pbuf]), %%d5, %%acc0 \n"
518 "lea.l (20, %[qtab]), %[qtab] \n"
520 "movem.l (%[qtab]), %%d0-%%d4 \n"
521 "mac.l %%d0, %%d5, (768*4, %[pbuf]), %%d5, %%acc0 \n"
522 "mac.l %%d1, %%d5, (960*4, %[pbuf]), %%d5, %%acc0 \n"
523 "mac.l %%d2, %%d5, (1024*4, %[pbuf]), %%d5, %%acc0 \n"
524 "mac.l %%d3, %%d5, (1216*4, %[pbuf]), %%d5, %%acc0 \n"
525 "mac.l %%d4, %%d5, %%acc0 \n"
526 "lea.l (20, %[qtab]), %[qtab] \n"
528 "movclr.l %%acc0, %%d0 \n"
529 "move.l %%d0, (%[pout]) \n"
533 : "d0", "d1", "d2", "d3", "d4", "d5", "memory");
536 for (k
= 0; k
< 64; k
++)
539 output
[out
++] = FAAD_SYNTHESIS_SCALE(
540 MUL_F(p_buf_1
[k
], qmf_c
[idx0
]) +
541 MUL_F(p_buf_1
[k
+ 192 ], qmf_c
[idx0
+1]) +
542 MUL_F(p_buf_1
[k
+ 256 ], qmf_c
[idx0
+2]) +
543 MUL_F(p_buf_1
[k
+ 256+192], qmf_c
[idx0
+3]) +
544 MUL_F(p_buf_1
[k
+ 512 ], qmf_c
[idx0
+4]) +
545 MUL_F(p_buf_1
[k
+ 512+192], qmf_c
[idx0
+5]) +
546 MUL_F(p_buf_1
[k
+ 768 ], qmf_c
[idx0
+6]) +
547 MUL_F(p_buf_1
[k
+ 768+192], qmf_c
[idx0
+7]) +
548 MUL_F(p_buf_1
[k
+1024 ], qmf_c
[idx0
+8]) +
549 MUL_F(p_buf_1
[k
+1024+192], qmf_c
[idx0
+9]));
553 /* update ringbuffer index */
554 qmfs
->v_index
-= 128;
555 if (qmfs
->v_index
< 0)
556 qmfs
->v_index
= (1280 - 128);
559 #endif /* #ifdef SBR_LOW_POWER */
561 #endif /* #ifdef SBR_DEC */