1 /* Copyright (C) 2002-2006 Jean-Marc Valin
3 Various analysis/synthesis filters
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
9 - Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 - Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 - Neither the name of the Xiph.org Foundation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
24 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "config-speex.h"
38 #include "stack_alloc.h"
40 #include "math_approx.h"
45 #include "filters_sse.h"
46 #elif defined (ARM4_ASM) || defined(ARM5E_ASM)
47 #include "filters_arm4.h"
48 #define OVERRIDE_IIR_MEM16
49 #define OVERRIDE_QMF_SYNTH
50 #define OVERRIDE_SIGNAL_MUL
51 #elif defined (COLDFIRE_ASM)
52 #define OVERRIDE_IIR_MEM16
53 #define OVERRIDE_QMF_SYNTH
54 #define OVERRIDE_SIGNAL_MUL
55 #elif defined (BFIN_ASM)
56 #include "filters_bfin.h"
61 void bw_lpc(spx_word16_t gamma
, const spx_coef_t
*lpc_in
, spx_coef_t
*lpc_out
, int order
)
64 spx_word16_t tmp
=gamma
;
67 lpc_out
[i
] = MULT16_16_P15(tmp
,lpc_in
[i
]);
68 tmp
= MULT16_16_P15(tmp
, gamma
);
72 void sanitize_values32(spx_word32_t
*vec
, spx_word32_t min_val
, spx_word32_t max_val
, int len
)
77 /* It's important we do the test that way so we can catch NaNs, which are neither greater nor smaller */
78 if (!(vec
[i
]>=min_val
&& vec
[i
] <= max_val
))
82 else if (vec
[i
] > max_val
)
84 else /* Has to be NaN */
90 void highpass(const spx_word16_t
*x
, spx_word16_t
*y
, int len
, int filtID
, spx_mem_t
*mem
)
94 static const spx_word16_t Pcoef
[5][3] ICONST_ATTR
= {{16384, -31313, 14991}, {16384, -31569, 15249}, {16384, -31677, 15328}, {16384, -32313, 15947}, {16384, -22446, 6537}};
95 static const spx_word16_t Zcoef
[5][3] ICONST_ATTR
= {{15672, -31344, 15672}, {15802, -31601, 15802}, {15847, -31694, 15847}, {16162, -32322, 16162}, {14418, -28836, 14418}};
97 const spx_word16_t Pcoef
[5][3] = {{1.00000f
, -1.91120f
, 0.91498f
}, {1.00000f
, -1.92683f
, 0.93071f
}, {1.00000f
, -1.93338f
, 0.93553f
}, {1.00000f
, -1.97226f
, 0.97332f
}, {1.00000f
, -1.37000f
, 0.39900f
}};
98 const spx_word16_t Zcoef
[5][3] = {{0.95654f
, -1.91309f
, 0.95654f
}, {0.96446f
, -1.92879f
, 0.96446f
}, {0.96723f
, -1.93445f
, 0.96723f
}, {0.98645f
, -1.97277f
, 0.98645f
}, {0.88000f
, -1.76000f
, 0.88000f
}};
100 const spx_word16_t
*den
, *num
;
104 den
= Pcoef
[filtID
]; num
= Zcoef
[filtID
];
109 spx_word32_t vout
= ADD32(MULT16_16(num
[0], x
[i
]),mem
[0]);
110 yi
= EXTRACT16(SATURATE(PSHR32(vout
,14),32767));
111 mem
[0] = ADD32(MAC16_16(mem
[1], num
[1],x
[i
]), SHL32(MULT16_32_Q15(-den
[1],vout
),1));
112 mem
[1] = ADD32(MULT16_16(num
[2],x
[i
]), SHL32(MULT16_32_Q15(-den
[2],vout
),1));
119 #ifndef OVERRIDE_SIGNAL_MUL
120 /* FIXME: These functions are ugly and probably introduce too much error */
121 void signal_mul(const spx_sig_t
*x
, spx_sig_t
*y
, spx_word32_t scale
, int len
)
126 y
[i
] = SHL32(MULT16_32_Q14(EXTRACT16(SHR32(x
[i
],7)),scale
),7);
131 #ifndef SPEEX_DISABLE_ENCODER
132 void signal_div(const spx_word16_t
*x
, spx_word16_t
*y
, spx_word32_t scale
, int len
)
135 if (scale
> SHL32(EXTEND32(SIG_SCALING
), 8))
137 spx_word16_t scale_1
;
138 scale
= PSHR32(scale
, SIG_SHIFT
);
139 scale_1
= EXTRACT16(PDIV32_16(SHL32(EXTEND32(SIG_SCALING
),7),scale
));
142 y
[i
] = MULT16_16_P15(scale_1
, x
[i
]);
144 } else if (scale
> SHR32(EXTEND32(SIG_SCALING
), 2)) {
145 spx_word16_t scale_1
;
146 scale
= PSHR32(scale
, SIG_SHIFT
-5);
147 scale_1
= DIV32_16(SHL32(EXTEND32(SIG_SCALING
),3),scale
);
150 y
[i
] = PSHR32(MULT16_16(scale_1
, SHL16(x
[i
],2)),8);
153 spx_word16_t scale_1
;
154 scale
= PSHR32(scale
, SIG_SHIFT
-7);
157 scale_1
= DIV32_16(SHL32(EXTEND32(SIG_SCALING
),3),scale
);
160 y
[i
] = PSHR32(MULT16_16(scale_1
, SHL16(x
[i
],2)),6);
164 #endif /* SPEEX_DISABLE_ENCODER */
168 void signal_mul(const spx_sig_t
*x
, spx_sig_t
*y
, spx_word32_t scale
, int len
)
175 void signal_div(const spx_sig_t
*x
, spx_sig_t
*y
, spx_word32_t scale
, int len
)
178 float scale_1
= 1/scale
;
190 #ifndef SPEEX_DISABLE_ENCODER
191 spx_word16_t
compute_rms(const spx_sig_t
*x
, int len
)
200 spx_sig_t tmp
= x
[i
];
208 while (max_val
>16383)
218 tmp
= EXTRACT16(SHR32(x
[i
],sig_shift
));
219 sum2
= MAC16_16(sum2
,tmp
,tmp
);
220 tmp
= EXTRACT16(SHR32(x
[i
+1],sig_shift
));
221 sum2
= MAC16_16(sum2
,tmp
,tmp
);
222 tmp
= EXTRACT16(SHR32(x
[i
+2],sig_shift
));
223 sum2
= MAC16_16(sum2
,tmp
,tmp
);
224 tmp
= EXTRACT16(SHR32(x
[i
+3],sig_shift
));
225 sum2
= MAC16_16(sum2
,tmp
,tmp
);
226 sum
= ADD32(sum
,SHR32(sum2
,6));
229 return EXTRACT16(PSHR32(SHL32(EXTEND32(spx_sqrt(DIV32(sum
,len
))),(sig_shift
+3)),SIG_SHIFT
));
231 #endif /*SPEEX_DISABLE_ENCODER */
233 spx_word16_t
compute_rms16(const spx_word16_t
*x
, int len
)
236 spx_word16_t max_val
=10;
240 spx_sig_t tmp
= x
[i
];
252 sum2
= MAC16_16(sum2
,SHR16(x
[i
],1),SHR16(x
[i
],1));
253 sum2
= MAC16_16(sum2
,SHR16(x
[i
+1],1),SHR16(x
[i
+1],1));
254 sum2
= MAC16_16(sum2
,SHR16(x
[i
+2],1),SHR16(x
[i
+2],1));
255 sum2
= MAC16_16(sum2
,SHR16(x
[i
+3],1),SHR16(x
[i
+3],1));
256 sum
= ADD32(sum
,SHR32(sum2
,6));
258 return SHL16(spx_sqrt(DIV32(sum
,len
)),4);
271 sum2
= MAC16_16(sum2
,SHL16(x
[i
],sig_shift
),SHL16(x
[i
],sig_shift
));
272 sum2
= MAC16_16(sum2
,SHL16(x
[i
+1],sig_shift
),SHL16(x
[i
+1],sig_shift
));
273 sum2
= MAC16_16(sum2
,SHL16(x
[i
+2],sig_shift
),SHL16(x
[i
+2],sig_shift
));
274 sum2
= MAC16_16(sum2
,SHL16(x
[i
+3],sig_shift
),SHL16(x
[i
+3],sig_shift
));
275 sum
= ADD32(sum
,SHR32(sum2
,6));
277 return SHL16(spx_sqrt(DIV32(sum
,len
)),3-sig_shift
);
281 #ifndef OVERRIDE_NORMALIZE16
282 int normalize16(const spx_sig_t
*x
, spx_word16_t
*y
, spx_sig_t max_scale
, int len
)
290 spx_sig_t tmp
= x
[i
];
298 while (max_val
>max_scale
)
305 y
[i
] = EXTRACT16(SHR32(x
[i
], sig_shift
));
313 spx_word16_t
compute_rms(const spx_sig_t
*x
, int len
)
321 return sqrt(.1+sum
/len
);
323 spx_word16_t
compute_rms16(const spx_word16_t
*x
, int len
)
325 return compute_rms(x
, len
);
331 #ifndef SPEEX_DISABLE_ENCODER
332 #ifndef OVERRIDE_FILTER_MEM16
333 void filter_mem16(const spx_word16_t
*x
, const spx_coef_t
*num
, const spx_coef_t
*den
, spx_word16_t
*y
, int N
, int ord
, spx_mem_t
*mem
, char *stack
)
336 spx_word16_t xi
,yi
,nyi
;
340 yi
= EXTRACT16(SATURATE(ADD32(EXTEND32(x
[i
]),PSHR32(mem
[0],LPC_SHIFT
)),32767));
342 for (j
=0;j
<ord
-1;j
++)
344 mem
[j
] = MAC16_16(MAC16_16(mem
[j
+1], num
[j
],xi
), den
[j
],nyi
);
346 mem
[ord
-1] = ADD32(MULT16_16(num
[ord
-1],xi
), MULT16_16(den
[ord
-1],nyi
));
351 #endif /* SPEEX_DISABLE_ENCODER */
353 #ifndef OVERRIDE_IIR_MEM16
354 void iir_mem16(const spx_word16_t
*x
, const spx_coef_t
*den
, spx_word16_t
*y
, int N
, int ord
, spx_mem_t
*mem
, char *stack
)
362 yi
= EXTRACT16(SATURATE(ADD32(EXTEND32(x
[i
]),PSHR32(mem
[0],LPC_SHIFT
)),32767));
364 for (j
=0;j
<ord
-1;j
++)
366 mem
[j
] = MAC16_16(mem
[j
+1],den
[j
],nyi
);
368 mem
[ord
-1] = MULT16_16(den
[ord
-1],nyi
);
374 #ifndef SPEEX_DISABLE_ENCODER
375 #ifndef OVERRIDE_FIR_MEM16
376 void fir_mem16(const spx_word16_t
*x
, const spx_coef_t
*num
, spx_word16_t
*y
, int N
, int ord
, spx_mem_t
*mem
, char *stack
)
384 yi
= EXTRACT16(SATURATE(ADD32(EXTEND32(x
[i
]),PSHR32(mem
[0],LPC_SHIFT
)),32767));
385 for (j
=0;j
<ord
-1;j
++)
387 mem
[j
] = MAC16_16(mem
[j
+1], num
[j
],xi
);
389 mem
[ord
-1] = MULT16_16(num
[ord
-1],xi
);
396 void syn_percep_zero16(const spx_word16_t
*xx
, const spx_coef_t
*ak
, const spx_coef_t
*awk1
, const spx_coef_t
*awk2
, spx_word16_t
*y
, int N
, int ord
, char *stack
)
399 VARDECL(spx_mem_t
*mem
);
400 ALLOC(mem
, ord
, spx_mem_t
);
403 iir_mem16(xx
, ak
, y
, N
, ord
, mem
, stack
);
406 filter_mem16(y
, awk1
, awk2
, y
, N
, ord
, mem
, stack
);
408 void residue_percep_zero16(const spx_word16_t
*xx
, const spx_coef_t
*ak
, const spx_coef_t
*awk1
, const spx_coef_t
*awk2
, spx_word16_t
*y
, int N
, int ord
, char *stack
)
411 VARDECL(spx_mem_t
*mem
);
412 ALLOC(mem
, ord
, spx_mem_t
);
415 filter_mem16(xx
, ak
, awk1
, y
, N
, ord
, mem
, stack
);
418 fir_mem16(y
, awk2
, y
, N
, ord
, mem
, stack
);
422 #ifndef OVERRIDE_COMPUTE_IMPULSE_RESPONSE
423 void compute_impulse_response(const spx_coef_t
*ak
, const spx_coef_t
*awk1
, const spx_coef_t
*awk2
, spx_word16_t
*y
, int N
, int ord
, char *stack
)
426 spx_word16_t y1
, ny1i
, ny2i
;
427 VARDECL(spx_mem_t
*mem1
);
428 VARDECL(spx_mem_t
*mem2
);
429 ALLOC(mem1
, ord
, spx_mem_t
);
430 ALLOC(mem2
, ord
, spx_mem_t
);
439 mem1
[i
] = mem2
[i
] = 0;
442 y1
= ADD16(y
[i
], EXTRACT16(PSHR32(mem1
[0],LPC_SHIFT
)));
444 y
[i
] = PSHR32(ADD32(SHL32(EXTEND32(y1
),LPC_SHIFT
+1),mem2
[0]),LPC_SHIFT
);
446 for (j
=0;j
<ord
-1;j
++)
448 mem1
[j
] = MAC16_16(mem1
[j
+1], awk2
[j
],ny1i
);
449 mem2
[j
] = MAC16_16(mem2
[j
+1], ak
[j
],ny2i
);
451 mem1
[ord
-1] = MULT16_16(awk2
[ord
-1],ny1i
);
452 mem2
[ord
-1] = MULT16_16(ak
[ord
-1],ny2i
);
457 /* Decomposes a signal into low-band and high-band using a QMF */
458 void qmf_decomp(const spx_word16_t
*xx
, const spx_word16_t
*aa
, spx_word16_t
*y1
, spx_word16_t
*y2
, int N
, int M
, spx_word16_t
*mem
, char *stack
)
461 VARDECL(spx_word16_t
*a
);
462 VARDECL(spx_word16_t
*x
);
465 ALLOC(a
, M
, spx_word16_t
);
466 ALLOC(x
, N
+M
-1, spx_word16_t
);
474 x
[i
+M
-1]=SHR16(xx
[i
],1);
476 mem
[i
]=SHR16(xx
[N
-i
-1],1);
477 for (i
=0,k
=0;i
<N
;i
+=2,k
++)
479 spx_word32_t y1k
=0, y2k
=0;
482 y1k
=ADD32(y1k
,MULT16_16(a
[j
],ADD16(x
[i
+j
],x2
[i
-j
])));
483 y2k
=SUB32(y2k
,MULT16_16(a
[j
],SUB16(x
[i
+j
],x2
[i
-j
])));
485 y1k
=ADD32(y1k
,MULT16_16(a
[j
],ADD16(x
[i
+j
],x2
[i
-j
])));
486 y2k
=ADD32(y2k
,MULT16_16(a
[j
],SUB16(x
[i
+j
],x2
[i
-j
])));
488 y1
[k
] = EXTRACT16(SATURATE(PSHR32(y1k
,15),32767));
489 y2
[k
] = EXTRACT16(SATURATE(PSHR32(y2k
,15),32767));
492 #endif /* SPEEX_DISABLE_ENCODER */
494 #ifndef OVERRIDE_QMF_SYNTH
495 /* Re-synthesised a signal from the QMF low-band and high-band signals */
496 void qmf_synth(const spx_word16_t
*x1
, const spx_word16_t
*x2
, const spx_word16_t
*a
, spx_word16_t
*y
, int N
, int M
, spx_word16_t
*mem1
, spx_word16_t
*mem2
, char *stack
)
498 all odd x[i] are zero -- well, actually they are left out of the array now
499 N and M are multiples of 4 */
504 VARDECL(spx_word16_t
*xx1
);
505 VARDECL(spx_word16_t
*xx2
);
509 ALLOC(xx1
, M2
+N2
, spx_word16_t
);
510 ALLOC(xx2
, M2
+N2
, spx_word16_t
);
512 for (i
= 0; i
< N2
; i
++)
514 for (i
= 0; i
< M2
; i
++)
515 xx1
[N2
+i
] = mem1
[2*i
+1];
516 for (i
= 0; i
< N2
; i
++)
518 for (i
= 0; i
< M2
; i
++)
519 xx2
[N2
+i
] = mem2
[2*i
+1];
521 for (i
= 0; i
< N2
; i
+= 2) {
522 spx_sig_t y0
, y1
, y2
, y3
;
523 spx_word16_t x10
, x20
;
525 y0
= y1
= y2
= y3
= 0;
529 for (j
= 0; j
< M2
; j
+= 2) {
530 spx_word16_t x11
, x21
;
539 /* We multiply twice by the same coef to avoid overflows */
540 y0
= MAC16_16(MAC16_16(y0
, a0
, x11
), NEG16(a0
), x21
);
541 y1
= MAC16_16(MAC16_16(y1
, a1
, x11
), a1
, x21
);
542 y2
= MAC16_16(MAC16_16(y2
, a0
, x10
), NEG16(a0
), x20
);
543 y3
= MAC16_16(MAC16_16(y3
, a1
, x10
), a1
, x20
);
545 y0
= ADD32(y0
,MULT16_16(a0
, x11
-x21
));
546 y1
= ADD32(y1
,MULT16_16(a1
, x11
+x21
));
547 y2
= ADD32(y2
,MULT16_16(a0
, x10
-x20
));
548 y3
= ADD32(y3
,MULT16_16(a1
, x10
+x20
));
556 /* We multiply twice by the same coef to avoid overflows */
557 y0
= MAC16_16(MAC16_16(y0
, a0
, x10
), NEG16(a0
), x20
);
558 y1
= MAC16_16(MAC16_16(y1
, a1
, x10
), a1
, x20
);
559 y2
= MAC16_16(MAC16_16(y2
, a0
, x11
), NEG16(a0
), x21
);
560 y3
= MAC16_16(MAC16_16(y3
, a1
, x11
), a1
, x21
);
562 y0
= ADD32(y0
,MULT16_16(a0
, x10
-x20
));
563 y1
= ADD32(y1
,MULT16_16(a1
, x10
+x20
));
564 y2
= ADD32(y2
,MULT16_16(a0
, x11
-x21
));
565 y3
= ADD32(y3
,MULT16_16(a1
, x11
+x21
));
569 y
[2*i
] = EXTRACT16(SATURATE32(PSHR32(y0
,15),32767));
570 y
[2*i
+1] = EXTRACT16(SATURATE32(PSHR32(y1
,15),32767));
571 y
[2*i
+2] = EXTRACT16(SATURATE32(PSHR32(y2
,15),32767));
572 y
[2*i
+3] = EXTRACT16(SATURATE32(PSHR32(y3
,15),32767));
574 /* Normalize up explicitly if we're in float */
582 for (i
= 0; i
< M2
; i
++)
583 mem1
[2*i
+1] = xx1
[i
];
584 for (i
= 0; i
< M2
; i
++)
585 mem2
[2*i
+1] = xx2
[i
];
591 const spx_word16_t shift_filt
[3][7] = {{-33, 1043, -4551, 19959, 19959, -4551, 1043},
592 {-98, 1133, -4425, 29179, 8895, -2328, 444},
593 {444, -2328, 8895, 29179, -4425, 1133, -98}};
595 const spx_word16_t shift_filt
[3][7] = {{-390, 1540, -4993, 20123, 20123, -4993, 1540},
596 {-1064, 2817, -6694, 31589, 6837, -990, -209},
597 {-209, -990, 6837, 31589, -6694, 2817, -1064}};
601 const float shift_filt
[3][7] = {{-9.9369e-04, 3.1831e-02, -1.3889e-01, 6.0910e-01, 6.0910e-01, -1.3889e-01, 3.1831e-02},
602 {-0.0029937, 0.0345613, -0.1350474, 0.8904793, 0.2714479, -0.0710304, 0.0135403},
603 {0.0135403, -0.0710304, 0.2714479, 0.8904793, -0.1350474, 0.0345613, -0.0029937}};
605 const float shift_filt
[3][7] = {{-0.011915f
, 0.046995f
, -0.152373f
, 0.614108f
, 0.614108f
, -0.152373f
, 0.046995f
},
606 {-0.0324855f
, 0.0859768f
, -0.2042986f
, 0.9640297f
, 0.2086420f
, -0.0302054f
, -0.0063646f
},
607 {-0.0063646f
, -0.0302054f
, 0.2086420f
, 0.9640297f
, -0.2042986f
, 0.0859768f
, -0.0324855f
}};
611 static int interp_pitch(
612 spx_word16_t
*exc
, /*decoded excitation*/
613 spx_word16_t
*interp
, /*decoded excitation*/
614 int pitch
, /*pitch period*/
619 spx_word32_t corr
[4][7];
620 spx_word32_t maxcorr
;
624 corr
[0][i
] = inner_prod(exc
, exc
-pitch
-3+i
, len
);
639 tmp
+= MULT16_32_Q15(shift_filt
[i
][k
],corr
[0][j
+k
-3]);
644 maxcorr
= corr
[0][0];
649 if (corr
[i
][j
] > maxcorr
)
651 maxcorr
= corr
[i
][j
];
659 spx_word32_t tmp
= 0;
664 tmp
+= MULT16_16(exc
[i
-(pitch
-maxj
+3)+k
-3],shift_filt
[maxi
-1][k
]);
667 tmp
= SHL32(exc
[i
-(pitch
-maxj
+3)],15);
669 interp
[i
] = PSHR32(tmp
,15);
675 spx_word16_t
*exc
, /*decoded excitation*/
676 spx_word16_t
*new_exc
, /*enhanced excitation*/
677 spx_coef_t
*ak
, /*LPC filter coefs*/
679 int nsf
, /*sub-frame size*/
680 int pitch
, /*pitch period*/
682 spx_word16_t comb_gain
, /*gain of comb filter*/
690 VARDECL(spx_word16_t
*iexc
);
691 spx_word16_t old_ener
, new_ener
;
694 spx_word16_t iexc0_mag
, iexc1_mag
, exc_mag
;
695 spx_word32_t corr0
, corr1
;
696 spx_word16_t gain0
, gain1
;
697 spx_word16_t pgain1
, pgain2
;
701 spx_word16_t gg1
, gg2
;
705 #if 0 /* Set to 1 to enable full pitch search */
707 spx_word16_t nol_pitch_coef
[6];
708 spx_word16_t ol_pitch_coef
;
709 open_loop_nbest_pitch(exc
, 20, 120, nsf
,
710 nol_pitch
, nol_pitch_coef
, 6, stack
);
711 corr_pitch
=nol_pitch
[0];
712 ol_pitch_coef
= nol_pitch_coef
[0];
713 /*Try to remove pitch multiples*/
717 if ((nol_pitch_coef
[i
]>MULT16_16_Q15(nol_pitch_coef
[0],19661)) &&
719 if ((nol_pitch_coef
[i
]>.6*nol_pitch_coef
[0]) &&
721 (ABS(2*nol_pitch
[i
]-corr_pitch
)<=2 || ABS(3*nol_pitch
[i
]-corr_pitch
)<=3 ||
722 ABS(4*nol_pitch
[i
]-corr_pitch
)<=4 || ABS(5*nol_pitch
[i
]-corr_pitch
)<=5))
724 corr_pitch
= nol_pitch
[i
];
731 ALLOC(iexc
, 2*nsf
, spx_word16_t
);
733 interp_pitch(exc
, iexc
, corr_pitch
, 80);
734 if (corr_pitch
>max_pitch
)
735 interp_pitch(exc
, iexc
+nsf
, 2*corr_pitch
, 80);
737 interp_pitch(exc
, iexc
+nsf
, -corr_pitch
, 80);
742 if (ABS16(exc
[i
])>16383)
751 exc
[i
] = SHR16(exc
[i
],1);
752 for (i
=0;i
<2*nsf
;i
++)
753 iexc
[i
] = SHR16(iexc
[i
],1);
756 /*interp_pitch(exc, iexc+2*nsf, 2*corr_pitch, 80);*/
758 /*printf ("%d %d %f\n", pitch, corr_pitch, max_corr*ener_1);*/
759 iexc0_mag
= spx_sqrt(1000+inner_prod(iexc
,iexc
,nsf
));
760 iexc1_mag
= spx_sqrt(1000+inner_prod(iexc
+nsf
,iexc
+nsf
,nsf
));
761 exc_mag
= spx_sqrt(1+inner_prod(exc
,exc
,nsf
));
762 corr0
= inner_prod(iexc
,exc
,nsf
);
765 corr1
= inner_prod(iexc
+nsf
,exc
,nsf
);
769 /* Doesn't cost much to limit the ratio and it makes the rest easier */
770 if (SHL32(EXTEND32(iexc0_mag
),6) < EXTEND32(exc_mag
))
771 iexc0_mag
= ADD16(1,PSHR16(exc_mag
,6));
772 if (SHL32(EXTEND32(iexc1_mag
),6) < EXTEND32(exc_mag
))
773 iexc1_mag
= ADD16(1,PSHR16(exc_mag
,6));
775 if (corr0
> MULT16_16(iexc0_mag
,exc_mag
))
776 pgain1
= QCONST16(1., 14);
778 pgain1
= PDIV32_16(SHL32(PDIV32(corr0
, exc_mag
),14),iexc0_mag
);
779 if (corr1
> MULT16_16(iexc1_mag
,exc_mag
))
780 pgain2
= QCONST16(1., 14);
782 pgain2
= PDIV32_16(SHL32(PDIV32(corr1
, exc_mag
),14),iexc1_mag
);
783 gg1
= PDIV32_16(SHL32(EXTEND32(exc_mag
),8), iexc0_mag
);
784 gg2
= PDIV32_16(SHL32(EXTEND32(exc_mag
),8), iexc1_mag
);
788 c1
= (MULT16_16_Q15(QCONST16(.4,15),comb_gain
)+QCONST16(.07,15));
789 c2
= QCONST16(.5,15)+MULT16_16_Q14(QCONST16(1.72,14),(c1
-QCONST16(.07,15)));
791 c1
= .4*comb_gain
+.07;
792 c2
= .5+1.72*(c1
-.07);
799 g1
= 32767 - MULT16_16_Q13(MULT16_16_Q15(c2
, pgain1
),pgain1
);
800 g2
= 32767 - MULT16_16_Q13(MULT16_16_Q15(c2
, pgain2
),pgain2
);
802 g1
= 1-c2
*pgain1
*pgain1
;
803 g2
= 1-c2
*pgain2
*pgain2
;
809 g1
= (spx_word16_t
)PDIV32_16(SHL32(EXTEND32(c1
),14),(spx_word16_t
)g1
);
810 g2
= (spx_word16_t
)PDIV32_16(SHL32(EXTEND32(c1
),14),(spx_word16_t
)g2
);
811 if (corr_pitch
>max_pitch
)
813 gain0
= MULT16_16_Q15(QCONST16(.7,15),MULT16_16_Q14(g1
,gg1
));
814 gain1
= MULT16_16_Q15(QCONST16(.3,15),MULT16_16_Q14(g2
,gg2
));
816 gain0
= MULT16_16_Q15(QCONST16(.6,15),MULT16_16_Q14(g1
,gg1
));
817 gain1
= MULT16_16_Q15(QCONST16(.6,15),MULT16_16_Q14(g2
,gg2
));
820 new_exc
[i
] = ADD16(exc
[i
], EXTRACT16(PSHR32(ADD32(MULT16_16(gain0
,iexc
[i
]), MULT16_16(gain1
,iexc
[i
+nsf
])),8)));
821 /* FIXME: compute_rms16 is currently not quite accurate enough (but close) */
822 new_ener
= compute_rms16(new_exc
, nsf
);
823 old_ener
= compute_rms16(exc
, nsf
);
829 if (old_ener
> new_ener
)
831 ngain
= PDIV32_16(SHL32(EXTEND32(old_ener
),14),new_ener
);
834 new_exc
[i
] = MULT16_16_Q14(ngain
, new_exc
[i
]);
839 exc
[i
] = SHL16(exc
[i
],1);
841 new_exc
[i
] = SHL16(SATURATE16(new_exc
[i
],16383),1);