celt_lpc: avoid overflows when computing lpcs in fixed point

The LPCs are computed in 32-bit, so increase the allowed range from +/-8
to +/-64 to avoid overflows caught during fuzzing. Before downshifting
back down to the +/-8 range in the final 16-bit output, perform bandwidth
extension to avoid any additional overflow issues.
This commit is contained in:
Felicia Lim 2021-02-22 22:29:14 -08:00
parent 16286a25fd
commit 7b05f44f4b
No known key found for this signature in database
GPG key ID: FDBB8534FAD78B08
5 changed files with 108 additions and 7 deletions

View file

@ -57,10 +57,10 @@ int p
opus_val32 rr = 0;
for (j = 0; j < i; j++)
rr += MULT32_32_Q31(lpc[j],ac[i - j]);
rr += SHR32(ac[i + 1],3);
r = -frac_div32(SHL32(rr,3), error);
rr += SHR32(ac[i + 1],6);
r = -frac_div32(SHL32(rr,6), error);
/* Update LPC coefficients and total error */
lpc[i] = SHR32(r,3);
lpc[i] = SHR32(r,6);
for (j = 0; j < (i+1)>>1; j++)
{
opus_val32 tmp1, tmp2;
@ -82,8 +82,52 @@ int p
}
}
#ifdef FIXED_POINT
for (i=0;i<p;i++)
_lpc[i] = ROUND16(lpc[i],16);
{
/* Convert the int32 lpcs to int16 and ensure there are no wrap-arounds.
This reuses the logic in silk_LPC_fit() and silk_bwexpander_32(). Any bug
fixes should also be applied there. */
int iter, idx = 0;
opus_val32 maxabs, absval, chirp_Q16, chirp_minus_one_Q16;
for (iter = 0; iter < 10; iter++) {
maxabs = 0;
for (i = 0; i < p; i++) {
absval = ABS32(lpc[i]);
if (absval > maxabs) {
maxabs = absval;
idx = i;
}
}
maxabs = PSHR32(maxabs, 13); /* Q25->Q12 */
if (maxabs > 32767) {
maxabs = MIN32(maxabs, 163838);
chirp_Q16 = QCONST32(0.999, 16) - DIV32(SHL32(maxabs - 32767, 14),
SHR32(MULT32_32_32(maxabs, idx + 1), 2));
chirp_minus_one_Q16 = chirp_Q16 - 65536;
/* Apply bandwidth expansion. */
for (i = 0; i < p - 1; i++) {
lpc[i] = MULT32_32_Q16(chirp_Q16, lpc[i]);
chirp_Q16 += PSHR32(MULT32_32_32(chirp_Q16, chirp_minus_one_Q16), 16);
}
lpc[p - 1] = MULT32_32_Q16(chirp_Q16, lpc[p - 1]);
} else {
break;
}
}
if (iter == 10) {
/* If the coeffs still do not fit into the 16 bit range after 10 iterations,
fall back to the A(z)=1 filter. */
OPUS_CLEAR(lpc, p);
_lpc[0] = 4096; /* Q12 */
} else {
for (i = 0; i < p; i++) {
_lpc[i] = EXTRACT16(PSHR32(lpc[i], 13)); /* Q25->Q12 */
}
}
}
#endif
}

View file

@ -410,6 +410,51 @@ static OPUS_INLINE short MULT16_16_16(int a, int b)
return res;
}
/* result fits in 32 bits */
static OPUS_INLINE int MULT32_32_32(opus_int64 a, opus_int64 b)
{
opus_int64 res;
if (!VERIFY_INT(a) || !VERIFY_INT(b))
{
fprintf (stderr, "MULT32_32_32: inputs are not int: %d %d\n", a, b);
#ifdef FIXED_DEBUG_ASSERT
celt_assert(0);
#endif
}
res = a*b;
if (!VERIFY_INT(res))
{
fprintf (stderr, "MULT32_32_32: output is not int: %d\n", res);
#ifdef FIXED_DEBUG_ASSERT
celt_assert(0);
#endif
}
celt_mips+=5;
return res;
}
static OPUS_INLINE int MULT32_32_Q16(opus_int64 a, opus_int64 b)
{
opus_int64 res;
if (!VERIFY_INT(a) || !VERIFY_INT(b))
{
fprintf (stderr, "MULT32_32_Q16: inputs are not int: %d %d\n", a, b);
#ifdef FIXED_DEBUG_ASSERT
celt_assert(0);
#endif
}
res = ((opus_int64)(a)*(opus_int64)(b)) >> 16;
if (!VERIFY_INT(res))
{
fprintf (stderr, "MULT32_32_Q16: output is not int: %d*%d=%d\n", a, b, (int)res);
#ifdef FIXED_DEBUG_ASSERT
celt_assert(0);
#endif
}
celt_mips+=5;
return res;
}
#define MULT16_16(a, b) MULT16_16_(a, b, __FILE__, __LINE__)
static OPUS_INLINE int MULT16_16_(int a, int b, char *file, int line)
{

View file

@ -57,6 +57,13 @@
#define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000ffff)),15))
#endif
/** 32x32 multiplication, followed by a 16-bit shift right. Results fits in 32 bits */
#if OPUS_FAST_INT64
#define MULT32_32_Q16(a,b) ((opus_val32)SHR((opus_int64)(a)*(opus_int64)(b),16))
#else
#define MULT32_32_Q16(a,b) (ADD32(ADD32(ADD32((opus_val32)(SHR32(((opus_uint32)((a)&0x0000ffff)*(opus_uint32)((b)&0x0000ffff)),16)), MULT16_16SU(SHR32(a,16),((b)&0x0000ffff))), MULT16_16SU(SHR32(b,16),((a)&0x0000ffff))), SHL32(MULT16_16(SHR32(a,16),SHR32(b,16)),16)))
#endif
/** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */
#if OPUS_FAST_INT64
#define MULT32_32_Q31(a,b) ((opus_val32)SHR((opus_int64)(a)*(opus_int64)(b),31))
@ -131,6 +138,9 @@
/** 16x16 multiplication where the result fits in 16 bits */
#define MULT16_16_16(a,b) ((((opus_val16)(a))*((opus_val16)(b))))
/** 32x32 multiplication where the result fits in 32 bits */
#define MULT32_32_32(a,b) ((((opus_val32)(a))*((opus_val32)(b))))
/* (opus_val32)(opus_val16) gives TI compiler a hint that it's 16x16->32 multiply */
/** 16x16 multiplication where the result fits in 32 bits */
#define MULT16_16(a,b) (((opus_val32)(opus_val16)(a))*((opus_val32)(opus_val16)(b)))

View file

@ -31,7 +31,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "SigProc_FIX.h"
/* Convert int32 coefficients to int16 coefs and make sure there's no wrap-around */
/* Convert int32 coefficients to int16 coefs and make sure there's no wrap-around.
This logic is reused in _celt_lpc(). Any bug fixes should also be applied there. */
void silk_LPC_fit(
opus_int16 *a_QOUT, /* O Output signal */
opus_int32 *a_QIN, /* I/O Input signal */

View file

@ -31,7 +31,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "SigProc_FIX.h"
/* Chirp (bandwidth expand) LP AR filter */
/* Chirp (bandwidth expand) LP AR filter.
This logic is reused in _celt_lpc(). Any bug fixes should also be applied there. */
void silk_bwexpander_32(
opus_int32 *ar, /* I/O AR filter to be expanded (without leading 1) */
const opus_int d, /* I Length of ar */