From 538f25565a03989540f49e7a9a97a07cb29ee55a Mon Sep 17 00:00:00 2001 From: Jean-Marc Valin Date: Mon, 26 Nov 2018 16:02:49 -0500 Subject: [PATCH] Starting to actually test this -- fix a few OOB reads --- dnn/dump_lpcnet.py | 2 +- dnn/lpcnet.c | 19 +++++++++++++++++++ dnn/nnet.c | 4 ++-- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/dnn/dump_lpcnet.py b/dnn/dump_lpcnet.py index 23b41fa5..61f146fb 100755 --- a/dnn/dump_lpcnet.py +++ b/dnn/dump_lpcnet.py @@ -116,7 +116,7 @@ def dump_mdense_layer(self, f, hf): activation = self.activation.__name__.upper() max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2]) f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n' - .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation)) + .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation)) hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0])) hf.write('extern const MDenseLayer {};\n\n'.format(name)); return False diff --git a/dnn/lpcnet.c b/dnn/lpcnet.c index 5b02cecd..94648130 100644 --- a/dnn/lpcnet.c +++ b/dnn/lpcnet.c @@ -25,6 +25,7 @@ */ #include +#include #include "nnet_data.h" #include "nnet.h" #include "common.h" @@ -150,3 +151,21 @@ void lpcnet_synthesize(LPCNetState *lpcnet, short *output, const float *features lpcnet->last_exc = exc; } } + +#if 1 +#define FRAME_SIZE 160 +int main(int argc, char **argv) { + LPCNetState *net; + net = lpcnet_create(); + while (1) { + float features[NB_FEATURES]; + short pcm[FRAME_SIZE]; + fread(features, sizeof(features[0]), NB_FEATURES, stdin); + if (feof(stdin)) break; + lpcnet_synthesize(net, pcm, features, FRAME_SIZE); + fwrite(pcm, sizeof(pcm[0]), FRAME_SIZE, stdout); + } + lpcnet_destroy(net); + return 0; +} +#endif diff --git a/dnn/nnet.c b/dnn/nnet.c index e8e621d1..33614074 100644 --- a/dnn/nnet.c +++ b/dnn/nnet.c @@ -217,13 +217,13 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f celt_assert(input != output); celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS); RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1)); - RNN_COPY(tmp, input, layer->nb_inputs); + RNN_COPY(&tmp[layer->nb_inputs*(layer->kernel_size-1)], input, layer->nb_inputs); M = layer->nb_inputs*layer->kernel_size; N = layer->nb_neurons; stride = N; for (i=0;ibias[i]; - gemm_accum(output, layer->input_weights, N, M, stride, input); + gemm_accum(output, layer->input_weights, N, M, stride, tmp); compute_activation(output, output, N, layer->activation); RNN_COPY(mem, &tmp[layer->nb_inputs], layer->nb_inputs*(layer->kernel_size-1)); }