3 * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
6 * The ifft algorithms in this file have been largely inspired by Dan
7 * Bernstein's work, djbfft, available at http://cr.yp.to/djbfft.html
9 * This file is part of a52dec, a free ATSC A-52 stream decoder.
10 * See http://liba52.sourceforge.net/ for updates.
12 * Modified for use with MPlayer, changes contained in liba52_changes.diff.
13 * detailed changelog at http://svn.mplayerhq.hu/mplayer/trunk/
16 * a52dec is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * a52dec is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 * SSE optimizations from Michael Niedermayer (michaelni@gmx.at)
31 * 3DNOW optimizations from Nick Kurshev <nickols_k@mail.ru>
32 * michael did port them from libac3 (untested, perhaps totally broken)
33 * AltiVec optimizations from Romain Dolbeau (romain@dolbeau.org)
44 #define M_PI 3.1415926535897932384626433832795029
49 #include "a52_internal.h"
53 void (*a52_imdct_512
) (sample_t
* data
, sample_t
* delay
, sample_t bias
);
55 #ifdef RUNTIME_CPUDETECT
59 typedef struct complex_s
{
64 static const int pm128
[128] attribute_used
__attribute__((aligned(16))) =
66 0, 16, 32, 48, 64, 80, 96, 112, 8, 40, 72, 104, 24, 56, 88, 120,
67 4, 20, 36, 52, 68, 84, 100, 116, 12, 28, 44, 60, 76, 92, 108, 124,
68 2, 18, 34, 50, 66, 82, 98, 114, 10, 42, 74, 106, 26, 58, 90, 122,
69 6, 22, 38, 54, 70, 86, 102, 118, 14, 46, 78, 110, 30, 62, 94, 126,
70 1, 17, 33, 49, 65, 81, 97, 113, 9, 41, 73, 105, 25, 57, 89, 121,
71 5, 21, 37, 53, 69, 85, 101, 117, 13, 29, 45, 61, 77, 93, 109, 125,
72 3, 19, 35, 51, 67, 83, 99, 115, 11, 43, 75, 107, 27, 59, 91, 123,
73 7, 23, 39, 55, 71, 87, 103, 119, 15, 31, 47, 63, 79, 95, 111, 127
76 static uint8_t attribute_used bit_reverse_512
[] = {
77 0x00, 0x40, 0x20, 0x60, 0x10, 0x50, 0x30, 0x70,
78 0x08, 0x48, 0x28, 0x68, 0x18, 0x58, 0x38, 0x78,
79 0x04, 0x44, 0x24, 0x64, 0x14, 0x54, 0x34, 0x74,
80 0x0c, 0x4c, 0x2c, 0x6c, 0x1c, 0x5c, 0x3c, 0x7c,
81 0x02, 0x42, 0x22, 0x62, 0x12, 0x52, 0x32, 0x72,
82 0x0a, 0x4a, 0x2a, 0x6a, 0x1a, 0x5a, 0x3a, 0x7a,
83 0x06, 0x46, 0x26, 0x66, 0x16, 0x56, 0x36, 0x76,
84 0x0e, 0x4e, 0x2e, 0x6e, 0x1e, 0x5e, 0x3e, 0x7e,
85 0x01, 0x41, 0x21, 0x61, 0x11, 0x51, 0x31, 0x71,
86 0x09, 0x49, 0x29, 0x69, 0x19, 0x59, 0x39, 0x79,
87 0x05, 0x45, 0x25, 0x65, 0x15, 0x55, 0x35, 0x75,
88 0x0d, 0x4d, 0x2d, 0x6d, 0x1d, 0x5d, 0x3d, 0x7d,
89 0x03, 0x43, 0x23, 0x63, 0x13, 0x53, 0x33, 0x73,
90 0x0b, 0x4b, 0x2b, 0x6b, 0x1b, 0x5b, 0x3b, 0x7b,
91 0x07, 0x47, 0x27, 0x67, 0x17, 0x57, 0x37, 0x77,
92 0x0f, 0x4f, 0x2f, 0x6f, 0x1f, 0x5f, 0x3f, 0x7f};
94 static uint8_t fftorder
[] = {
95 0,128, 64,192, 32,160,224, 96, 16,144, 80,208,240,112, 48,176,
96 8,136, 72,200, 40,168,232,104,248,120, 56,184, 24,152,216, 88,
97 4,132, 68,196, 36,164,228,100, 20,148, 84,212,244,116, 52,180,
98 252,124, 60,188, 28,156,220, 92, 12,140, 76,204,236,108, 44,172,
99 2,130, 66,194, 34,162,226, 98, 18,146, 82,210,242,114, 50,178,
100 10,138, 74,202, 42,170,234,106,250,122, 58,186, 26,154,218, 90,
101 254,126, 62,190, 30,158,222, 94, 14,142, 78,206,238,110, 46,174,
102 6,134, 70,198, 38,166,230,102,246,118, 54,182, 22,150,214, 86
105 static complex_t
__attribute__((aligned(16))) buf
[128];
107 /* Twiddle factor LUT */
108 static complex_t
__attribute__((aligned(16))) w_1
[1];
109 static complex_t
__attribute__((aligned(16))) w_2
[2];
110 static complex_t
__attribute__((aligned(16))) w_4
[4];
111 static complex_t
__attribute__((aligned(16))) w_8
[8];
112 static complex_t
__attribute__((aligned(16))) w_16
[16];
113 static complex_t
__attribute__((aligned(16))) w_32
[32];
114 static complex_t
__attribute__((aligned(16))) w_64
[64];
115 static complex_t
__attribute__((aligned(16))) * w
[7] = {w_1
, w_2
, w_4
, w_8
, w_16
, w_32
, w_64
};
117 /* Twiddle factors for IMDCT */
118 static sample_t
__attribute__((aligned(16))) xcos1
[128];
119 static sample_t
__attribute__((aligned(16))) xsin1
[128];
121 #if defined(ARCH_X86) || defined(ARCH_X86_64)
122 // NOTE: SSE needs 16byte alignment or it will segfault
124 static float __attribute__((aligned(16))) sseSinCos1c
[256];
125 static float __attribute__((aligned(16))) sseSinCos1d
[256];
126 static float attribute_used
__attribute__((aligned(16))) ps111_1
[4]={1,1,1,-1};
127 //static float __attribute__((aligned(16))) sseW0[4];
128 static float __attribute__((aligned(16))) sseW1
[8];
129 static float __attribute__((aligned(16))) sseW2
[16];
130 static float __attribute__((aligned(16))) sseW3
[32];
131 static float __attribute__((aligned(16))) sseW4
[64];
132 static float __attribute__((aligned(16))) sseW5
[128];
133 static float __attribute__((aligned(16))) sseW6
[256];
134 static float __attribute__((aligned(16))) *sseW
[7]=
135 {NULL
/*sseW0*/,sseW1
,sseW2
,sseW3
,sseW4
,sseW5
,sseW6
};
136 static float __attribute__((aligned(16))) sseWindow
[512];
139 /* Root values for IFFT */
140 static sample_t roots16
[3];
141 static sample_t roots32
[7];
142 static sample_t roots64
[15];
143 static sample_t roots128
[31];
145 /* Twiddle factors for IMDCT */
146 static complex_t pre1
[128];
147 static complex_t post1
[64];
148 static complex_t pre2
[64];
149 static complex_t post2
[32];
151 static sample_t a52_imdct_window
[256];
153 static void (* ifft128
) (complex_t
* buf
);
154 static void (* ifft64
) (complex_t
* buf
);
156 static inline void ifft2 (complex_t
* buf
)
162 buf
[0].real
+= buf
[1].real
;
163 buf
[0].imag
+= buf
[1].imag
;
164 buf
[1].real
= r
- buf
[1].real
;
165 buf
[1].imag
= i
- buf
[1].imag
;
168 static inline void ifft4 (complex_t
* buf
)
170 double tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
, tmp7
, tmp8
;
172 tmp1
= buf
[0].real
+ buf
[1].real
;
173 tmp2
= buf
[3].real
+ buf
[2].real
;
174 tmp3
= buf
[0].imag
+ buf
[1].imag
;
175 tmp4
= buf
[2].imag
+ buf
[3].imag
;
176 tmp5
= buf
[0].real
- buf
[1].real
;
177 tmp6
= buf
[0].imag
- buf
[1].imag
;
178 tmp7
= buf
[2].imag
- buf
[3].imag
;
179 tmp8
= buf
[3].real
- buf
[2].real
;
181 buf
[0].real
= tmp1
+ tmp2
;
182 buf
[0].imag
= tmp3
+ tmp4
;
183 buf
[2].real
= tmp1
- tmp2
;
184 buf
[2].imag
= tmp3
- tmp4
;
185 buf
[1].real
= tmp5
+ tmp7
;
186 buf
[1].imag
= tmp6
+ tmp8
;
187 buf
[3].real
= tmp5
- tmp7
;
188 buf
[3].imag
= tmp6
- tmp8
;
191 /* the basic split-radix ifft butterfly */
193 #define BUTTERFLY(a0,a1,a2,a3,wr,wi) do { \
194 tmp5 = a2.real * wr + a2.imag * wi; \
195 tmp6 = a2.imag * wr - a2.real * wi; \
196 tmp7 = a3.real * wr - a3.imag * wi; \
197 tmp8 = a3.imag * wr + a3.real * wi; \
198 tmp1 = tmp5 + tmp7; \
199 tmp2 = tmp6 + tmp8; \
200 tmp3 = tmp6 - tmp8; \
201 tmp4 = tmp7 - tmp5; \
202 a2.real = a0.real - tmp1; \
203 a2.imag = a0.imag - tmp2; \
204 a3.real = a1.real - tmp3; \
205 a3.imag = a1.imag - tmp4; \
212 /* split-radix ifft butterfly, specialized for wr=1 wi=0 */
214 #define BUTTERFLY_ZERO(a0,a1,a2,a3) do { \
215 tmp1 = a2.real + a3.real; \
216 tmp2 = a2.imag + a3.imag; \
217 tmp3 = a2.imag - a3.imag; \
218 tmp4 = a3.real - a2.real; \
219 a2.real = a0.real - tmp1; \
220 a2.imag = a0.imag - tmp2; \
221 a3.real = a1.real - tmp3; \
222 a3.imag = a1.imag - tmp4; \
229 /* split-radix ifft butterfly, specialized for wr=wi */
231 #define BUTTERFLY_HALF(a0,a1,a2,a3,w) do { \
232 tmp5 = (a2.real + a2.imag) * w; \
233 tmp6 = (a2.imag - a2.real) * w; \
234 tmp7 = (a3.real - a3.imag) * w; \
235 tmp8 = (a3.imag + a3.real) * w; \
236 tmp1 = tmp5 + tmp7; \
237 tmp2 = tmp6 + tmp8; \
238 tmp3 = tmp6 - tmp8; \
239 tmp4 = tmp7 - tmp5; \
240 a2.real = a0.real - tmp1; \
241 a2.imag = a0.imag - tmp2; \
242 a3.real = a1.real - tmp3; \
243 a3.imag = a1.imag - tmp4; \
250 static inline void ifft8 (complex_t
* buf
)
252 double tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
, tmp7
, tmp8
;
257 BUTTERFLY_ZERO (buf
[0], buf
[2], buf
[4], buf
[6]);
258 BUTTERFLY_HALF (buf
[1], buf
[3], buf
[5], buf
[7], roots16
[1]);
261 static void ifft_pass (complex_t
* buf
, sample_t
* weight
, int n
)
266 double tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
, tmp7
, tmp8
;
274 BUTTERFLY_ZERO (buf
[-1], buf1
[-1], buf2
[-1], buf3
[-1]);
279 BUTTERFLY (buf
[0], buf1
[0], buf2
[0], buf3
[0], weight
[n
], weight
[2*i
]);
288 static void ifft16 (complex_t
* buf
)
293 ifft_pass (buf
, roots16
- 4, 4);
296 static void ifft32 (complex_t
* buf
)
301 ifft_pass (buf
, roots32
- 8, 8);
304 static void ifft64_c (complex_t
* buf
)
309 ifft_pass (buf
, roots64
- 16, 16);
312 static void ifft128_c (complex_t
* buf
)
317 ifft_pass (buf
, roots64
- 16, 16);
321 ifft_pass (buf
, roots128
- 32, 32);
324 void imdct_do_512 (sample_t
* data
, sample_t
* delay
, sample_t bias
)
327 sample_t t_r
, t_i
, a_r
, a_i
, b_r
, b_i
, w_1
, w_2
;
328 const sample_t
* window
= a52_imdct_window
;
331 for (i
= 0; i
< 128; i
++) {
336 buf
[i
].real
= t_i
* data
[255-k
] + t_r
* data
[k
];
337 buf
[i
].imag
= t_r
* data
[255-k
] - t_i
* data
[k
];
342 /* Post IFFT complex multiply plus IFFT complex conjugate*/
343 /* Window and convert to real valued signal */
344 for (i
= 0; i
< 64; i
++) {
345 /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
349 a_r
= t_r
* buf
[i
].real
+ t_i
* buf
[i
].imag
;
350 a_i
= t_i
* buf
[i
].real
- t_r
* buf
[i
].imag
;
351 b_r
= t_i
* buf
[127-i
].real
+ t_r
* buf
[127-i
].imag
;
352 b_i
= t_r
* buf
[127-i
].real
- t_i
* buf
[127-i
].imag
;
355 w_2
= window
[255-2*i
];
356 data
[2*i
] = delay
[2*i
] * w_2
- a_r
* w_1
+ bias
;
357 data
[255-2*i
] = delay
[2*i
] * w_1
+ a_r
* w_2
+ bias
;
361 w_2
= window
[254-2*i
];
362 data
[2*i
+1] = delay
[2*i
+1] * w_2
+ b_r
* w_1
+ bias
;
363 data
[254-2*i
] = delay
[2*i
+1] * w_1
- b_r
* w_2
+ bias
;
370 #ifdef HAVE_ALTIVEC_H
374 // used to build registers permutation vectors (vcprm)
375 // the 's' are for words in the _s_econd vector
376 #define WORD_0 0x00,0x01,0x02,0x03
377 #define WORD_1 0x04,0x05,0x06,0x07
378 #define WORD_2 0x08,0x09,0x0a,0x0b
379 #define WORD_3 0x0c,0x0d,0x0e,0x0f
380 #define WORD_s0 0x10,0x11,0x12,0x13
381 #define WORD_s1 0x14,0x15,0x16,0x17
382 #define WORD_s2 0x18,0x19,0x1a,0x1b
383 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
386 #define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
388 #define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
391 // vcprmle is used to keep the same index as in the SSE version.
392 // it's the same as vcprm, with the index inversed
393 // ('le' is Little Endian)
394 #define vcprmle(a,b,c,d) vcprm(d,c,b,a)
396 // used to build inverse/identity vectors (vcii)
397 // n is _n_egative, p is _p_ositive
402 #define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
404 #define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
408 #define FOUROF(a) (a)
410 #define FOUROF(a) {a,a,a,a}
415 imdct_do_512_altivec(sample_t data
[],sample_t delay
[], sample_t bias
)
431 sample_t
*window_ptr
;
433 /* 512 IMDCT with source and dest data in 'data' */
435 /* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/
436 for( i
=0; i
< 128; i
++) {
437 /* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */
438 int j
= bit_reverse_512
[i
];
439 buf
[i
].real
= (data
[256-2*j
-1] * xcos1
[j
]) - (data
[2*j
] * xsin1
[j
]);
440 buf
[i
].imag
= -1.0 * ((data
[2*j
] * xcos1
[j
]) + (data
[256-2*j
-1] * xsin1
[j
]));
444 for(i
= 0; i
< 128; i
+= 2) {
446 tmp_a_r
= buf
[i
].real
;
447 tmp_a_i
= buf
[i
].imag
;
448 tmp_b_r
= buf
[i
+1].real
;
449 tmp_b_i
= buf
[i
+1].imag
;
450 buf
[i
].real
= tmp_a_r
+ tmp_b_r
;
451 buf
[i
].imag
= tmp_a_i
+ tmp_b_i
;
452 buf
[i
+1].real
= tmp_a_r
- tmp_b_r
;
453 buf
[i
+1].imag
= tmp_a_i
- tmp_b_i
;
455 vector
float temp
, bufv
;
457 bufv
= vec_ld(i
<< 3, (float*)buf
);
458 temp
= vec_perm(bufv
, bufv
, vcprm(2,3,0,1));
459 bufv
= vec_madd(bufv
, vcii(p
,p
,n
,n
), temp
);
460 vec_st(bufv
, i
<< 3, (float*)buf
);
465 // Note w[1]={{1,0}, {0,-1}}
466 for(i
= 0; i
< 128; i
+= 4) {
468 tmp_a_r
= buf
[i
].real
;
469 tmp_a_i
= buf
[i
].imag
;
470 tmp_b_r
= buf
[i
+2].real
;
471 tmp_b_i
= buf
[i
+2].imag
;
472 buf
[i
].real
= tmp_a_r
+ tmp_b_r
;
473 buf
[i
].imag
= tmp_a_i
+ tmp_b_i
;
474 buf
[i
+2].real
= tmp_a_r
- tmp_b_r
;
475 buf
[i
+2].imag
= tmp_a_i
- tmp_b_i
;
476 tmp_a_r
= buf
[i
+1].real
;
477 tmp_a_i
= buf
[i
+1].imag
;
478 /* WARNING: im <-> re here ! */
479 tmp_b_r
= buf
[i
+3].imag
;
480 tmp_b_i
= buf
[i
+3].real
;
481 buf
[i
+1].real
= tmp_a_r
+ tmp_b_r
;
482 buf
[i
+1].imag
= tmp_a_i
- tmp_b_i
;
483 buf
[i
+3].real
= tmp_a_r
- tmp_b_r
;
484 buf
[i
+3].imag
= tmp_a_i
+ tmp_b_i
;
486 vector
float buf01
, buf23
, temp1
, temp2
;
488 buf01
= vec_ld((i
+ 0) << 3, (float*)buf
);
489 buf23
= vec_ld((i
+ 2) << 3, (float*)buf
);
490 buf23
= vec_perm(buf23
,buf23
,vcprm(0,1,3,2));
492 temp1
= vec_madd(buf23
, vcii(p
,p
,p
,n
), buf01
);
493 temp2
= vec_madd(buf23
, vcii(n
,n
,n
,p
), buf01
);
495 vec_st(temp1
, (i
+ 0) << 3, (float*)buf
);
496 vec_st(temp2
, (i
+ 2) << 3, (float*)buf
);
501 for(i
= 0; i
< 128; i
+= 8) {
503 tmp_a_r
= buf
[i
].real
;
504 tmp_a_i
= buf
[i
].imag
;
505 tmp_b_r
= buf
[i
+4].real
;
506 tmp_b_i
= buf
[i
+4].imag
;
507 buf
[i
].real
= tmp_a_r
+ tmp_b_r
;
508 buf
[i
].imag
= tmp_a_i
+ tmp_b_i
;
509 buf
[i
+4].real
= tmp_a_r
- tmp_b_r
;
510 buf
[i
+4].imag
= tmp_a_i
- tmp_b_i
;
511 tmp_a_r
= buf
[1+i
].real
;
512 tmp_a_i
= buf
[1+i
].imag
;
513 tmp_b_r
= (buf
[i
+5].real
+ buf
[i
+5].imag
) * w
[2][1].real
;
514 tmp_b_i
= (buf
[i
+5].imag
- buf
[i
+5].real
) * w
[2][1].real
;
515 buf
[1+i
].real
= tmp_a_r
+ tmp_b_r
;
516 buf
[1+i
].imag
= tmp_a_i
+ tmp_b_i
;
517 buf
[i
+5].real
= tmp_a_r
- tmp_b_r
;
518 buf
[i
+5].imag
= tmp_a_i
- tmp_b_i
;
519 tmp_a_r
= buf
[i
+2].real
;
520 tmp_a_i
= buf
[i
+2].imag
;
521 /* WARNING re <-> im & sign */
522 tmp_b_r
= buf
[i
+6].imag
;
523 tmp_b_i
= - buf
[i
+6].real
;
524 buf
[i
+2].real
= tmp_a_r
+ tmp_b_r
;
525 buf
[i
+2].imag
= tmp_a_i
+ tmp_b_i
;
526 buf
[i
+6].real
= tmp_a_r
- tmp_b_r
;
527 buf
[i
+6].imag
= tmp_a_i
- tmp_b_i
;
528 tmp_a_r
= buf
[i
+3].real
;
529 tmp_a_i
= buf
[i
+3].imag
;
530 tmp_b_r
= (buf
[i
+7].real
- buf
[i
+7].imag
) * w
[2][3].imag
;
531 tmp_b_i
= (buf
[i
+7].imag
+ buf
[i
+7].real
) * w
[2][3].imag
;
532 buf
[i
+3].real
= tmp_a_r
+ tmp_b_r
;
533 buf
[i
+3].imag
= tmp_a_i
+ tmp_b_i
;
534 buf
[i
+7].real
= tmp_a_r
- tmp_b_r
;
535 buf
[i
+7].imag
= tmp_a_i
- tmp_b_i
;
537 vector
float buf01
, buf23
, buf45
, buf67
;
539 buf01
= vec_ld((i
+ 0) << 3, (float*)buf
);
540 buf23
= vec_ld((i
+ 2) << 3, (float*)buf
);
542 tmp_b_r
= (buf
[i
+5].real
+ buf
[i
+5].imag
) * w
[2][1].real
;
543 tmp_b_i
= (buf
[i
+5].imag
- buf
[i
+5].real
) * w
[2][1].real
;
544 buf
[i
+5].real
= tmp_b_r
;
545 buf
[i
+5].imag
= tmp_b_i
;
546 tmp_b_r
= (buf
[i
+7].real
- buf
[i
+7].imag
) * w
[2][3].imag
;
547 tmp_b_i
= (buf
[i
+7].imag
+ buf
[i
+7].real
) * w
[2][3].imag
;
548 buf
[i
+7].real
= tmp_b_r
;
549 buf
[i
+7].imag
= tmp_b_i
;
551 buf23
= vec_ld((i
+ 2) << 3, (float*)buf
);
552 buf45
= vec_ld((i
+ 4) << 3, (float*)buf
);
553 buf67
= vec_ld((i
+ 6) << 3, (float*)buf
);
554 buf67
= vec_perm(buf67
, buf67
, vcprm(1,0,2,3));
556 vec_st(vec_add(buf01
, buf45
), (i
+ 0) << 3, (float*)buf
);
557 vec_st(vec_madd(buf67
, vcii(p
,n
,p
,p
), buf23
), (i
+ 2) << 3, (float*)buf
);
558 vec_st(vec_sub(buf01
, buf45
), (i
+ 4) << 3, (float*)buf
);
559 vec_st(vec_nmsub(buf67
, vcii(p
,n
,p
,p
), buf23
), (i
+ 6) << 3, (float*)buf
);
563 /* 4-7. iterations */
564 for (m
=3; m
< 7; m
++) {
567 two_m_plus_one
= two_m
<<1;
569 for(i
= 0; i
< 128; i
+= two_m_plus_one
) {
570 for(k
= 0; k
< two_m
; k
+=2) {
574 tmp_a_r
= buf
[p
].real
;
575 tmp_a_i
= buf
[p
].imag
;
577 buf
[q
].real
* w
[m
][k
].real
-
578 buf
[q
].imag
* w
[m
][k
].imag
;
580 buf
[q
].imag
* w
[m
][k
].real
+
581 buf
[q
].real
* w
[m
][k
].imag
;
582 buf
[p
].real
= tmp_a_r
+ tmp_b_r
;
583 buf
[p
].imag
= tmp_a_i
+ tmp_b_i
;
584 buf
[q
].real
= tmp_a_r
- tmp_b_r
;
585 buf
[q
].imag
= tmp_a_i
- tmp_b_i
;
587 tmp_a_r
= buf
[(p
+ 1)].real
;
588 tmp_a_i
= buf
[(p
+ 1)].imag
;
590 buf
[(q
+ 1)].real
* w
[m
][(k
+ 1)].real
-
591 buf
[(q
+ 1)].imag
* w
[m
][(k
+ 1)].imag
;
593 buf
[(q
+ 1)].imag
* w
[m
][(k
+ 1)].real
+
594 buf
[(q
+ 1)].real
* w
[m
][(k
+ 1)].imag
;
595 buf
[(p
+ 1)].real
= tmp_a_r
+ tmp_b_r
;
596 buf
[(p
+ 1)].imag
= tmp_a_i
+ tmp_b_i
;
597 buf
[(q
+ 1)].real
= tmp_a_r
- tmp_b_r
;
598 buf
[(q
+ 1)].imag
= tmp_a_i
- tmp_b_i
;
602 vector
float vecp
, vecq
, vecw
, temp1
, temp2
, temp3
, temp4
;
603 const vector
float vczero
= (const vector
float)FOUROF(0.);
604 // first compute buf[q] and buf[q+1]
605 vecq
= vec_ld(q
<< 3, (float*)buf
);
606 vecw
= vec_ld(0, (float*)&(w
[m
][k
]));
607 temp1
= vec_madd(vecq
, vecw
, vczero
);
608 temp2
= vec_perm(vecq
, vecq
, vcprm(1,0,3,2));
609 temp2
= vec_madd(temp2
, vecw
, vczero
);
610 temp3
= vec_perm(temp1
, temp2
, vcprm(0,s0
,2,s2
));
611 temp4
= vec_perm(temp1
, temp2
, vcprm(1,s1
,3,s3
));
612 vecq
= vec_madd(temp4
, vcii(n
,p
,n
,p
), temp3
);
613 // then butterfly with buf[p] and buf[p+1]
614 vecp
= vec_ld(p
<< 3, (float*)buf
);
616 temp1
= vec_add(vecp
, vecq
);
617 temp2
= vec_sub(vecp
, vecq
);
619 vec_st(temp1
, p
<< 3, (float*)buf
);
620 vec_st(temp2
, q
<< 3, (float*)buf
);
626 /* Post IFFT complex multiply plus IFFT complex conjugate*/
627 for( i
=0; i
< 128; i
+=4) {
628 /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
630 tmp_a_r
= buf
[(i
+ 0)].real
;
631 tmp_a_i
= -1.0 * buf
[(i
+ 0)].imag
;
633 (tmp_a_r
* xcos1
[(i
+ 0)]) - (tmp_a_i
* xsin1
[(i
+ 0)]);
635 (tmp_a_r
* xsin1
[(i
+ 0)]) + (tmp_a_i
* xcos1
[(i
+ 0)]);
637 tmp_a_r
= buf
[(i
+ 1)].real
;
638 tmp_a_i
= -1.0 * buf
[(i
+ 1)].imag
;
640 (tmp_a_r
* xcos1
[(i
+ 1)]) - (tmp_a_i
* xsin1
[(i
+ 1)]);
642 (tmp_a_r
* xsin1
[(i
+ 1)]) + (tmp_a_i
* xcos1
[(i
+ 1)]);
644 tmp_a_r
= buf
[(i
+ 2)].real
;
645 tmp_a_i
= -1.0 * buf
[(i
+ 2)].imag
;
647 (tmp_a_r
* xcos1
[(i
+ 2)]) - (tmp_a_i
* xsin1
[(i
+ 2)]);
649 (tmp_a_r
* xsin1
[(i
+ 2)]) + (tmp_a_i
* xcos1
[(i
+ 2)]);
651 tmp_a_r
= buf
[(i
+ 3)].real
;
652 tmp_a_i
= -1.0 * buf
[(i
+ 3)].imag
;
654 (tmp_a_r
* xcos1
[(i
+ 3)]) - (tmp_a_i
* xsin1
[(i
+ 3)]);
656 (tmp_a_r
* xsin1
[(i
+ 3)]) + (tmp_a_i
* xcos1
[(i
+ 3)]);
658 vector
float bufv_0
, bufv_2
, cosv
, sinv
, temp1
, temp2
;
659 vector
float temp0022
, temp1133
, tempCS01
;
660 const vector
float vczero
= (const vector
float)FOUROF(0.);
662 bufv_0
= vec_ld((i
+ 0) << 3, (float*)buf
);
663 bufv_2
= vec_ld((i
+ 2) << 3, (float*)buf
);
665 cosv
= vec_ld(i
<< 2, xcos1
);
666 sinv
= vec_ld(i
<< 2, xsin1
);
668 temp0022
= vec_perm(bufv_0
, bufv_0
, vcprm(0,0,2,2));
669 temp1133
= vec_perm(bufv_0
, bufv_0
, vcprm(1,1,3,3));
670 tempCS01
= vec_perm(cosv
, sinv
, vcprm(0,s0
,1,s1
));
671 temp1
= vec_madd(temp0022
, tempCS01
, vczero
);
672 tempCS01
= vec_perm(cosv
, sinv
, vcprm(s0
,0,s1
,1));
673 temp2
= vec_madd(temp1133
, tempCS01
, vczero
);
674 bufv_0
= vec_madd(temp2
, vcii(p
,n
,p
,n
), temp1
);
676 vec_st(bufv_0
, (i
+ 0) << 3, (float*)buf
);
678 /* idem with bufv_2 and high-order cosv/sinv */
680 temp0022
= vec_perm(bufv_2
, bufv_2
, vcprm(0,0,2,2));
681 temp1133
= vec_perm(bufv_2
, bufv_2
, vcprm(1,1,3,3));
682 tempCS01
= vec_perm(cosv
, sinv
, vcprm(2,s2
,3,s3
));
683 temp1
= vec_madd(temp0022
, tempCS01
, vczero
);
684 tempCS01
= vec_perm(cosv
, sinv
, vcprm(s2
,2,s3
,3));
685 temp2
= vec_madd(temp1133
, tempCS01
, vczero
);
686 bufv_2
= vec_madd(temp2
, vcii(p
,n
,p
,n
), temp1
);
688 vec_st(bufv_2
, (i
+ 2) << 3, (float*)buf
);
695 window_ptr
= a52_imdct_window
;
697 /* Window and convert to real valued signal */
698 for(i
=0; i
< 64; i
++) {
699 *data_ptr
++ = -buf
[64+i
].imag
* *window_ptr
++ + *delay_ptr
++ + bias
;
700 *data_ptr
++ = buf
[64-i
-1].real
* *window_ptr
++ + *delay_ptr
++ + bias
;
703 for(i
=0; i
< 64; i
++) {
704 *data_ptr
++ = -buf
[i
].real
* *window_ptr
++ + *delay_ptr
++ + bias
;
705 *data_ptr
++ = buf
[128-i
-1].imag
* *window_ptr
++ + *delay_ptr
++ + bias
;
708 /* The trailing edge of the window goes into the delay line */
711 for(i
=0; i
< 64; i
++) {
712 *delay_ptr
++ = -buf
[64+i
].real
* *--window_ptr
;
713 *delay_ptr
++ = buf
[64-i
-1].imag
* *--window_ptr
;
716 for(i
=0; i
<64; i
++) {
717 *delay_ptr
++ = buf
[i
].imag
* *--window_ptr
;
718 *delay_ptr
++ = -buf
[128-i
-1].real
* *--window_ptr
;
724 // Stuff below this line is borrowed from libac3
726 #if defined(ARCH_X86) || defined(ARCH_X86_64)
730 #include "srfftp_3dnow.h"
732 const i_cmplx_t x_plus_minus_3dnow
__attribute__ ((aligned (8))) = {{ 0x00000000UL
, 0x80000000UL
}};
733 const i_cmplx_t x_minus_plus_3dnow
__attribute__ ((aligned (8))) = {{ 0x80000000UL
, 0x00000000UL
}};
734 const complex_t HSQRT2_3DNOW
__attribute__ ((aligned (8))) = { 0.707106781188, 0.707106781188 };
737 #include "imdct_3dnow.h"
739 #include "imdct_3dnow.h"
742 imdct_do_512_sse(sample_t data
[],sample_t delay
[], sample_t bias
)
749 long two_m_plus_one_shl3
;
750 complex_t
*buf_offset
;
759 sample_t
*window_ptr
;
761 /* 512 IMDCT with source and dest data in 'data' */
762 /* see the c version (dct_do_512()), its allmost identical, just in C */
764 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
765 /* Bit reversed shuffling */
767 "xor %%"REG_S
", %%"REG_S
" \n\t"
768 "lea "MANGLE(bit_reverse_512
)", %%"REG_a
"\n\t"
769 "mov $1008, %%"REG_D
" \n\t"
770 "push %%"REG_BP
" \n\t" //use ebp without telling gcc
773 "movlps (%0, %%"REG_S
"), %%xmm0 \n\t" // XXXI
774 "movhps 8(%0, %%"REG_D
"), %%xmm0 \n\t" // RXXI
775 "movlps 8(%0, %%"REG_S
"), %%xmm1 \n\t" // XXXi
776 "movhps (%0, %%"REG_D
"), %%xmm1 \n\t" // rXXi
777 "shufps $0x33, %%xmm1, %%xmm0 \n\t" // irIR
778 "movaps "MANGLE(sseSinCos1c
)"(%%"REG_S
"), %%xmm2\n\t"
779 "mulps %%xmm0, %%xmm2 \n\t"
780 "shufps $0xB1, %%xmm0, %%xmm0 \n\t" // riRI
781 "mulps "MANGLE(sseSinCos1d
)"(%%"REG_S
"), %%xmm0\n\t"
782 "subps %%xmm0, %%xmm2 \n\t"
783 "movzb (%%"REG_a
"), %%"REG_d
" \n\t"
784 "movzb 1(%%"REG_a
"), %%"REG_BP
" \n\t"
785 "movlps %%xmm2, (%1, %%"REG_d
", 8) \n\t"
786 "movhps %%xmm2, (%1, %%"REG_BP
", 8) \n\t"
787 "add $16, %%"REG_S
" \n\t"
788 "add $2, %%"REG_a
" \n\t" // avoid complex addressing for P4 crap
789 "sub $16, %%"REG_D
" \n\t"
791 "pop %%"REG_BP
" \n\t"//no we didnt touch ebp *g*
792 :: "b" (data
), "c" (buf
)
793 : "%"REG_S
, "%"REG_D
, "%"REG_a
, "%"REG_d
798 /* unoptimized variant
799 for (m=1; m < 7; m++) {
805 two_m_plus_one = (1 << (m+1));
807 for(i = 0; i < 128; i += two_m_plus_one) {
808 for(k = 0; k < two_m; k++) {
811 tmp_a_r = buf[p].real;
812 tmp_a_i = buf[p].imag;
813 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag;
814 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag;
815 buf[p].real = tmp_a_r + tmp_b_r;
816 buf[p].imag = tmp_a_i + tmp_b_i;
817 buf[q].real = tmp_a_r - tmp_b_r;
818 buf[q].imag = tmp_a_i - tmp_b_i;
825 // Note w[0][0]={1,0}
827 "xorps %%xmm1, %%xmm1 \n\t"
828 "xorps %%xmm2, %%xmm2 \n\t"
829 "mov %0, %%"REG_S
" \n\t"
832 "movlps (%%"REG_S
"), %%xmm0\n\t" //buf[p]
833 "movlps 8(%%"REG_S
"), %%xmm1\n\t" //buf[q]
834 "movhps (%%"REG_S
"), %%xmm0\n\t" //buf[p]
835 "movhps 8(%%"REG_S
"), %%xmm2\n\t" //buf[q]
836 "addps %%xmm1, %%xmm0 \n\t"
837 "subps %%xmm2, %%xmm0 \n\t"
838 "movaps %%xmm0, (%%"REG_S
")\n\t"
839 "add $16, %%"REG_S
" \n\t"
840 "cmp %1, %%"REG_S
" \n\t"
842 :: "g" (buf
), "r" (buf
+ 128)
847 // Note w[1]={{1,0}, {0,-1}}
849 "movaps "MANGLE(ps111_1
)", %%xmm7\n\t" // 1,1,1,-1
850 "mov %0, %%"REG_S
" \n\t"
853 "movaps 16(%%"REG_S
"), %%xmm2 \n\t" //r2,i2,r3,i3
854 "shufps $0xB4, %%xmm2, %%xmm2 \n\t" //r2,i2,i3,r3
855 "mulps %%xmm7, %%xmm2 \n\t" //r2,i2,i3,-r3
856 "movaps (%%"REG_S
"), %%xmm0 \n\t" //r0,i0,r1,i1
857 "movaps (%%"REG_S
"), %%xmm1 \n\t" //r0,i0,r1,i1
858 "addps %%xmm2, %%xmm0 \n\t"
859 "subps %%xmm2, %%xmm1 \n\t"
860 "movaps %%xmm0, (%%"REG_S
") \n\t"
861 "movaps %%xmm1, 16(%%"REG_S
") \n\t"
862 "add $32, %%"REG_S
" \n\t"
863 "cmp %1, %%"REG_S
" \n\t"
865 :: "g" (buf
), "r" (buf
+ 128)
871 Note sseW2+0={1,1,sqrt(2),sqrt(2))
872 Note sseW2+16={0,0,sqrt(2),-sqrt(2))
873 Note sseW2+32={0,0,-sqrt(2),-sqrt(2))
874 Note sseW2+48={1,-1,sqrt(2),-sqrt(2))
877 "movaps 48+"MANGLE(sseW2
)", %%xmm6\n\t"
878 "movaps 16+"MANGLE(sseW2
)", %%xmm7\n\t"
879 "xorps %%xmm5, %%xmm5 \n\t"
880 "xorps %%xmm2, %%xmm2 \n\t"
881 "mov %0, %%"REG_S
" \n\t"
884 "movaps 32(%%"REG_S
"), %%xmm2 \n\t" //r4,i4,r5,i5
885 "movaps 48(%%"REG_S
"), %%xmm3 \n\t" //r6,i6,r7,i7
886 "movaps "MANGLE(sseW2
)", %%xmm4 \n\t" //r4,i4,r5,i5
887 "movaps 32+"MANGLE(sseW2
)", %%xmm5\n\t" //r6,i6,r7,i7
888 "mulps %%xmm2, %%xmm4 \n\t"
889 "mulps %%xmm3, %%xmm5 \n\t"
890 "shufps $0xB1, %%xmm2, %%xmm2 \n\t" //i4,r4,i5,r5
891 "shufps $0xB1, %%xmm3, %%xmm3 \n\t" //i6,r6,i7,r7
892 "mulps %%xmm6, %%xmm3 \n\t"
893 "mulps %%xmm7, %%xmm2 \n\t"
894 "movaps (%%"REG_S
"), %%xmm0 \n\t" //r0,i0,r1,i1
895 "movaps 16(%%"REG_S
"), %%xmm1 \n\t" //r2,i2,r3,i3
896 "addps %%xmm4, %%xmm2 \n\t"
897 "addps %%xmm5, %%xmm3 \n\t"
898 "movaps %%xmm2, %%xmm4 \n\t"
899 "movaps %%xmm3, %%xmm5 \n\t"
900 "addps %%xmm0, %%xmm2 \n\t"
901 "addps %%xmm1, %%xmm3 \n\t"
902 "subps %%xmm4, %%xmm0 \n\t"
903 "subps %%xmm5, %%xmm1 \n\t"
904 "movaps %%xmm2, (%%"REG_S
") \n\t"
905 "movaps %%xmm3, 16(%%"REG_S
") \n\t"
906 "movaps %%xmm0, 32(%%"REG_S
") \n\t"
907 "movaps %%xmm1, 48(%%"REG_S
") \n\t"
908 "add $64, %%"REG_S
" \n\t"
909 "cmp %1, %%"REG_S
" \n\t"
911 :: "g" (buf
), "r" (buf
+ 128)
915 /* 4-7. iterations */
916 for (m
=3; m
< 7; m
++) {
918 two_m_plus_one
= two_m
<<1;
919 two_m_plus_one_shl3
= (two_m_plus_one
<<3);
920 buf_offset
= buf
+128;
922 "mov %0, %%"REG_S
" \n\t"
925 "xor %%"REG_D
", %%"REG_D
" \n\t" // k
926 "lea (%%"REG_S
", %3), %%"REG_d
" \n\t"
928 "movaps (%%"REG_d
", %%"REG_D
"), %%xmm1 \n\t"
929 "movaps (%4, %%"REG_D
", 2), %%xmm2 \n\t"
930 "mulps %%xmm1, %%xmm2 \n\t"
931 "shufps $0xB1, %%xmm1, %%xmm1 \n\t"
932 "mulps 16(%4, %%"REG_D
", 2), %%xmm1 \n\t"
933 "movaps (%%"REG_S
", %%"REG_D
"), %%xmm0 \n\t"
934 "addps %%xmm2, %%xmm1 \n\t"
935 "movaps %%xmm1, %%xmm2 \n\t"
936 "addps %%xmm0, %%xmm1 \n\t"
937 "subps %%xmm2, %%xmm0 \n\t"
938 "movaps %%xmm1, (%%"REG_S
", %%"REG_D
") \n\t"
939 "movaps %%xmm0, (%%"REG_d
", %%"REG_D
") \n\t"
940 "add $16, %%"REG_D
" \n\t"
941 "cmp %3, %%"REG_D
" \n\t" //FIXME (opt) count against 0
943 "add %2, %%"REG_S
" \n\t"
944 "cmp %1, %%"REG_S
" \n\t"
946 :: "g" (buf
), "m" (buf_offset
), "m" (two_m_plus_one_shl3
), "r" (two_m
<<3),
948 : "%"REG_S
, "%"REG_D
, "%"REG_d
952 /* Post IFFT complex multiply plus IFFT complex conjugate*/
954 "mov $-1024, %%"REG_S
" \n\t"
957 "movaps (%0, %%"REG_S
"), %%xmm0 \n\t"
958 "movaps (%0, %%"REG_S
"), %%xmm1 \n\t"
959 "shufps $0xB1, %%xmm0, %%xmm0 \n\t"
960 "mulps 1024+"MANGLE(sseSinCos1c
)"(%%"REG_S
"), %%xmm1\n\t"
961 "mulps 1024+"MANGLE(sseSinCos1d
)"(%%"REG_S
"), %%xmm0\n\t"
962 "addps %%xmm1, %%xmm0 \n\t"
963 "movaps %%xmm0, (%0, %%"REG_S
") \n\t"
964 "add $16, %%"REG_S
" \n\t"
973 window_ptr
= a52_imdct_window
;
975 /* Window and convert to real valued signal */
977 "xor %%"REG_D
", %%"REG_D
" \n\t" // 0
978 "xor %%"REG_S
", %%"REG_S
" \n\t" // 0
979 "movss %3, %%xmm2 \n\t" // bias
980 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ...
983 "movlps (%0, %%"REG_S
"), %%xmm0 \n\t" // ? ? A ?
984 "movlps 8(%0, %%"REG_S
"), %%xmm1 \n\t" // ? ? C ?
985 "movhps -16(%0, %%"REG_D
"), %%xmm1 \n\t" // ? D C ?
986 "movhps -8(%0, %%"REG_D
"), %%xmm0 \n\t" // ? B A ?
987 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A
988 "mulps "MANGLE(sseWindow
)"(%%"REG_S
"), %%xmm0\n\t"
989 "addps (%2, %%"REG_S
"), %%xmm0 \n\t"
990 "addps %%xmm2, %%xmm0 \n\t"
991 "movaps %%xmm0, (%1, %%"REG_S
") \n\t"
992 "add $16, %%"REG_S
" \n\t"
993 "sub $16, %%"REG_D
" \n\t"
994 "cmp $512, %%"REG_S
" \n\t"
996 :: "r" (buf
+64), "r" (data_ptr
), "r" (delay_ptr
), "m" (bias
)
1004 "mov $1024, %%"REG_D
" \n\t" // 512
1005 "xor %%"REG_S
", %%"REG_S
" \n\t" // 0
1006 "movss %3, %%xmm2 \n\t" // bias
1007 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ...
1010 "movlps (%0, %%"REG_S
"), %%xmm0 \n\t" // ? ? ? A
1011 "movlps 8(%0, %%"REG_S
"), %%xmm1 \n\t" // ? ? ? C
1012 "movhps -16(%0, %%"REG_D
"), %%xmm1 \n\t" // D ? ? C
1013 "movhps -8(%0, %%"REG_D
"), %%xmm0 \n\t" // B ? ? A
1014 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A
1015 "mulps 512+"MANGLE(sseWindow
)"(%%"REG_S
"), %%xmm0\n\t"
1016 "addps (%2, %%"REG_S
"), %%xmm0 \n\t"
1017 "addps %%xmm2, %%xmm0 \n\t"
1018 "movaps %%xmm0, (%1, %%"REG_S
") \n\t"
1019 "add $16, %%"REG_S
" \n\t"
1020 "sub $16, %%"REG_D
" \n\t"
1021 "cmp $512, %%"REG_S
" \n\t"
1023 :: "r" (buf
), "r" (data_ptr
), "r" (delay_ptr
), "m" (bias
)
1024 : "%"REG_S
, "%"REG_D
1029 /* The trailing edge of the window goes into the delay line */
1033 "xor %%"REG_D
", %%"REG_D
" \n\t" // 0
1034 "xor %%"REG_S
", %%"REG_S
" \n\t" // 0
1037 "movlps (%0, %%"REG_S
"), %%xmm0 \n\t" // ? ? ? A
1038 "movlps 8(%0, %%"REG_S
"), %%xmm1 \n\t" // ? ? ? C
1039 "movhps -16(%0, %%"REG_D
"), %%xmm1 \n\t" // D ? ? C
1040 "movhps -8(%0, %%"REG_D
"), %%xmm0 \n\t" // B ? ? A
1041 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A
1042 "mulps 1024+"MANGLE(sseWindow
)"(%%"REG_S
"), %%xmm0\n\t"
1043 "movaps %%xmm0, (%1, %%"REG_S
") \n\t"
1044 "add $16, %%"REG_S
" \n\t"
1045 "sub $16, %%"REG_D
" \n\t"
1046 "cmp $512, %%"REG_S
" \n\t"
1048 :: "r" (buf
+64), "r" (delay_ptr
)
1049 : "%"REG_S
, "%"REG_D
1055 "mov $1024, %%"REG_D
" \n\t" // 1024
1056 "xor %%"REG_S
", %%"REG_S
" \n\t" // 0
1059 "movlps (%0, %%"REG_S
"), %%xmm0 \n\t" // ? ? A ?
1060 "movlps 8(%0, %%"REG_S
"), %%xmm1 \n\t" // ? ? C ?
1061 "movhps -16(%0, %%"REG_D
"), %%xmm1 \n\t" // ? D C ?
1062 "movhps -8(%0, %%"REG_D
"), %%xmm0 \n\t" // ? B A ?
1063 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A
1064 "mulps 1536+"MANGLE(sseWindow
)"(%%"REG_S
"), %%xmm0\n\t"
1065 "movaps %%xmm0, (%1, %%"REG_S
") \n\t"
1066 "add $16, %%"REG_S
" \n\t"
1067 "sub $16, %%"REG_D
" \n\t"
1068 "cmp $512, %%"REG_S
" \n\t"
1070 :: "r" (buf
), "r" (delay_ptr
)
1071 : "%"REG_S
, "%"REG_D
1074 #endif // ARCH_X86 || ARCH_X86_64
1076 void a52_imdct_256(sample_t
* data
, sample_t
* delay
, sample_t bias
)
1079 sample_t t_r
, t_i
, a_r
, a_i
, b_r
, b_i
, c_r
, c_i
, d_r
, d_i
, w_1
, w_2
;
1080 const sample_t
* window
= a52_imdct_window
;
1081 complex_t buf1
[64], buf2
[64];
1083 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
1084 for (i
= 0; i
< 64; i
++) {
1089 buf1
[i
].real
= t_i
* data
[254-k
] + t_r
* data
[k
];
1090 buf1
[i
].imag
= t_r
* data
[254-k
] - t_i
* data
[k
];
1092 buf2
[i
].real
= t_i
* data
[255-k
] + t_r
* data
[k
+1];
1093 buf2
[i
].imag
= t_r
* data
[255-k
] - t_i
* data
[k
+1];
1099 /* Post IFFT complex multiply */
1100 /* Window and convert to real valued signal */
1101 for (i
= 0; i
< 32; i
++) {
1102 /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */
1103 t_r
= post2
[i
].real
;
1104 t_i
= post2
[i
].imag
;
1106 a_r
= t_r
* buf1
[i
].real
+ t_i
* buf1
[i
].imag
;
1107 a_i
= t_i
* buf1
[i
].real
- t_r
* buf1
[i
].imag
;
1108 b_r
= t_i
* buf1
[63-i
].real
+ t_r
* buf1
[63-i
].imag
;
1109 b_i
= t_r
* buf1
[63-i
].real
- t_i
* buf1
[63-i
].imag
;
1111 c_r
= t_r
* buf2
[i
].real
+ t_i
* buf2
[i
].imag
;
1112 c_i
= t_i
* buf2
[i
].real
- t_r
* buf2
[i
].imag
;
1113 d_r
= t_i
* buf2
[63-i
].real
+ t_r
* buf2
[63-i
].imag
;
1114 d_i
= t_r
* buf2
[63-i
].real
- t_i
* buf2
[63-i
].imag
;
1117 w_2
= window
[255-2*i
];
1118 data
[2*i
] = delay
[2*i
] * w_2
- a_r
* w_1
+ bias
;
1119 data
[255-2*i
] = delay
[2*i
] * w_1
+ a_r
* w_2
+ bias
;
1122 w_1
= window
[128+2*i
];
1123 w_2
= window
[127-2*i
];
1124 data
[128+2*i
] = delay
[127-2*i
] * w_2
+ a_i
* w_1
+ bias
;
1125 data
[127-2*i
] = delay
[127-2*i
] * w_1
- a_i
* w_2
+ bias
;
1126 delay
[127-2*i
] = c_r
;
1128 w_1
= window
[2*i
+1];
1129 w_2
= window
[254-2*i
];
1130 data
[2*i
+1] = delay
[2*i
+1] * w_2
- b_i
* w_1
+ bias
;
1131 data
[254-2*i
] = delay
[2*i
+1] * w_1
+ b_i
* w_2
+ bias
;
1134 w_1
= window
[129+2*i
];
1135 w_2
= window
[126-2*i
];
1136 data
[129+2*i
] = delay
[126-2*i
] * w_2
+ b_r
* w_1
+ bias
;
1137 data
[126-2*i
] = delay
[126-2*i
] * w_1
- b_r
* w_2
+ bias
;
1138 delay
[126-2*i
] = d_i
;
1142 static double besselI0 (double x
)
1148 bessel
= bessel
* x
/ (i
* i
) + 1;
1153 void a52_imdct_init (uint32_t mm_accel
)
1158 /* compute imdct window - kaiser-bessel derived window, alpha = 5.0 */
1160 for (i
= 0; i
< 256; i
++) {
1161 sum
+= besselI0 (i
* (256 - i
) * (5 * M_PI
/ 256) * (5 * M_PI
/ 256));
1162 a52_imdct_window
[i
] = sum
;
1165 for (i
= 0; i
< 256; i
++)
1166 a52_imdct_window
[i
] = sqrt (a52_imdct_window
[i
] / sum
);
1168 for (i
= 0; i
< 3; i
++)
1169 roots16
[i
] = cos ((M_PI
/ 8) * (i
+ 1));
1171 for (i
= 0; i
< 7; i
++)
1172 roots32
[i
] = cos ((M_PI
/ 16) * (i
+ 1));
1174 for (i
= 0; i
< 15; i
++)
1175 roots64
[i
] = cos ((M_PI
/ 32) * (i
+ 1));
1177 for (i
= 0; i
< 31; i
++)
1178 roots128
[i
] = cos ((M_PI
/ 64) * (i
+ 1));
1180 for (i
= 0; i
< 64; i
++) {
1181 k
= fftorder
[i
] / 2 + 64;
1182 pre1
[i
].real
= cos ((M_PI
/ 256) * (k
- 0.25));
1183 pre1
[i
].imag
= sin ((M_PI
/ 256) * (k
- 0.25));
1186 for (i
= 64; i
< 128; i
++) {
1187 k
= fftorder
[i
] / 2 + 64;
1188 pre1
[i
].real
= -cos ((M_PI
/ 256) * (k
- 0.25));
1189 pre1
[i
].imag
= -sin ((M_PI
/ 256) * (k
- 0.25));
1192 for (i
= 0; i
< 64; i
++) {
1193 post1
[i
].real
= cos ((M_PI
/ 256) * (i
+ 0.5));
1194 post1
[i
].imag
= sin ((M_PI
/ 256) * (i
+ 0.5));
1197 for (i
= 0; i
< 64; i
++) {
1198 k
= fftorder
[i
] / 4;
1199 pre2
[i
].real
= cos ((M_PI
/ 128) * (k
- 0.25));
1200 pre2
[i
].imag
= sin ((M_PI
/ 128) * (k
- 0.25));
1203 for (i
= 0; i
< 32; i
++) {
1204 post2
[i
].real
= cos ((M_PI
/ 128) * (i
+ 0.5));
1205 post2
[i
].imag
= sin ((M_PI
/ 128) * (i
+ 0.5));
1207 for (i
= 0; i
< 128; i
++) {
1208 xcos1
[i
] = -cos ((M_PI
/ 2048) * (8 * i
+ 1));
1209 xsin1
[i
] = -sin ((M_PI
/ 2048) * (8 * i
+ 1));
1211 for (i
= 0; i
< 7; i
++) {
1213 for (k
= 0; k
< j
; k
++) {
1214 w
[i
][k
].real
= cos (-M_PI
* k
/ j
);
1215 w
[i
][k
].imag
= sin (-M_PI
* k
/ j
);
1218 #if defined(ARCH_X86) || defined(ARCH_X86_64)
1219 for (i
= 0; i
< 128; i
++) {
1220 sseSinCos1c
[2*i
+0]= xcos1
[i
];
1221 sseSinCos1c
[2*i
+1]= -xcos1
[i
];
1222 sseSinCos1d
[2*i
+0]= xsin1
[i
];
1223 sseSinCos1d
[2*i
+1]= xsin1
[i
];
1225 for (i
= 1; i
< 7; i
++) {
1227 for (k
= 0; k
< j
; k
+=2) {
1229 sseW
[i
][4*k
+ 0] = w
[i
][k
+0].real
;
1230 sseW
[i
][4*k
+ 1] = w
[i
][k
+0].real
;
1231 sseW
[i
][4*k
+ 2] = w
[i
][k
+1].real
;
1232 sseW
[i
][4*k
+ 3] = w
[i
][k
+1].real
;
1234 sseW
[i
][4*k
+ 4] = -w
[i
][k
+0].imag
;
1235 sseW
[i
][4*k
+ 5] = w
[i
][k
+0].imag
;
1236 sseW
[i
][4*k
+ 6] = -w
[i
][k
+1].imag
;
1237 sseW
[i
][4*k
+ 7] = w
[i
][k
+1].imag
;
1239 //we multiply more or less uninitalized numbers so we need to use exactly 0.0
1242 // sseW[i][4*k + 0]= sseW[i][4*k + 1]= 1.0;
1243 sseW
[i
][4*k
+ 4]= sseW
[i
][4*k
+ 5]= 0.0;
1248 sseW
[i
][4*k
+ 0]= sseW
[i
][4*k
+ 1]= 0.0;
1249 // sseW[i][4*k + 4]= -(sseW[i][4*k + 5]= -1.0);
1254 for(i
=0; i
<128; i
++)
1256 sseWindow
[2*i
+0]= -a52_imdct_window
[2*i
+0];
1257 sseWindow
[2*i
+1]= a52_imdct_window
[2*i
+1];
1262 sseWindow
[256 + 2*i
+0]= -a52_imdct_window
[254 - 2*i
+1];
1263 sseWindow
[256 + 2*i
+1]= a52_imdct_window
[254 - 2*i
+0];
1264 sseWindow
[384 + 2*i
+0]= a52_imdct_window
[126 - 2*i
+1];
1265 sseWindow
[384 + 2*i
+1]= -a52_imdct_window
[126 - 2*i
+0];
1268 a52_imdct_512
= imdct_do_512
;
1269 ifft128
= ifft128_c
;
1272 #if defined(ARCH_X86) || defined(ARCH_X86_64)
1273 if(mm_accel
& MM_ACCEL_X86_SSE
)
1275 fprintf (stderr
, "Using SSE optimized IMDCT transform\n");
1276 a52_imdct_512
= imdct_do_512_sse
;
1279 if(mm_accel
& MM_ACCEL_X86_3DNOWEXT
)
1281 fprintf (stderr
, "Using 3DNowEx optimized IMDCT transform\n");
1282 a52_imdct_512
= imdct_do_512_3dnowex
;
1285 if(mm_accel
& MM_ACCEL_X86_3DNOW
)
1287 fprintf (stderr
, "Using 3DNow optimized IMDCT transform\n");
1288 a52_imdct_512
= imdct_do_512_3dnow
;
1291 #endif // ARCH_X86 || ARCH_X86_64
1293 if (mm_accel
& MM_ACCEL_PPC_ALTIVEC
)
1295 fprintf(stderr
, "Using AltiVec optimized IMDCT transform\n");
1296 a52_imdct_512
= imdct_do_512_altivec
;
1301 #ifdef LIBA52_DJBFFT
1302 if (mm_accel
& MM_ACCEL_DJBFFT
) {
1303 fprintf (stderr
, "Using djbfft for IMDCT transform\n");
1304 ifft128
= (void (*) (complex_t
*)) fftc4_un128
;
1305 ifft64
= (void (*) (complex_t
*)) fftc4_un64
;
1309 fprintf (stderr
, "No accelerated IMDCT transform found\n");