Disabling the use of celt_fir() in silk_LPC_analysis_filter() by default

This commit is contained in:
Jean-Marc Valin 2016-07-25 12:08:08 -04:00
parent 07ca5cc776
commit c75fa187fc

View file

@ -39,6 +39,13 @@ POSSIBILITY OF SUCH DAMAGE.
/* first d output samples are set to zero */
/*******************************************/
/* OPT: Using celt_fir() for this function should be faster, but it may cause
integer overflows in intermediate values (not final results), which the
current implementation silences by casting to unsigned. Enabling
this should be safe in pretty much all cases, even though it is not technically
C89-compliant. */
#define USE_CELT_FIR 0
void silk_LPC_analysis_filter(
opus_int16 *out, /* O Output signal */
const opus_int16 *in, /* I Input signal */
@ -49,7 +56,7 @@ void silk_LPC_analysis_filter(
)
{
opus_int j;
#ifdef FIXED_POINT
#if USE_CELT_FIR
opus_int16 mem[SILK_MAX_ORDER_LPC];
opus_int16 num[SILK_MAX_ORDER_LPC];
#else
@ -62,7 +69,7 @@ void silk_LPC_analysis_filter(
silk_assert( (d & 1) == 0 );
silk_assert( d <= len );
#ifdef FIXED_POINT
#if USE_CELT_FIR
silk_assert( d <= SILK_MAX_ORDER_LPC );
for ( j = 0; j < d; j++ ) {
num[ j ] = -B[ j ];