diff --git a/dnn/dump_lpcnet.py b/dnn/dump_lpcnet.py index 8b0d25c2..936074c7 100755 --- a/dnn/dump_lpcnet.py +++ b/dnn/dump_lpcnet.py @@ -115,7 +115,7 @@ def dump_embedding_layer(self, f, hf): Embedding.dump_layer = dump_embedding_layer -model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=640, use_gpu=False) +model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=384, use_gpu=False) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) #model.summary() diff --git a/dnn/nnet.c b/dnn/nnet.c index 1e9333cb..9c567687 100644 --- a/dnn/nnet.c +++ b/dnn/nnet.c @@ -210,11 +210,9 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f int stride; float tmp[MAX_CONV_INPUTS]; celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS); - M = layer->nb_inputs; - N = layer->nb_neurons; RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1)); RNN_COPY(tmp, input, layer->nb_inputs); - M = layer->nb_inputs; + M = layer->nb_inputs*layer->kernel_size; N = layer->nb_neurons; stride = N; for (i=0;i