From 66486004ba1b2c44d53cd58b8346f9d78480b19f Mon Sep 17 00:00:00 2001 From: Jean-Marc Valin Date: Sat, 24 Nov 2018 12:23:11 -0500 Subject: [PATCH] Implement MDense --- dnn/dump_lpcnet.py | 4 ++-- dnn/nnet.c | 23 +++++++++++++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/dnn/dump_lpcnet.py b/dnn/dump_lpcnet.py index bda61633..8b0d25c2 100755 --- a/dnn/dump_lpcnet.py +++ b/dnn/dump_lpcnet.py @@ -80,8 +80,8 @@ def dump_mdense_layer(self, f, hf): printVector(f, weights[1], name + '_bias') printVector(f, weights[1], name + '_factor') activation = self.activation.__name__.upper() - f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n' - .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation)) + f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n' + .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation)) hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0])) hf.write('extern const MDenseLayer {};\n\n'.format(name)); return False diff --git a/dnn/nnet.c b/dnn/nnet.c index c62a5984..1e9333cb 100644 --- a/dnn/nnet.c +++ b/dnn/nnet.c @@ -118,7 +118,6 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input) int i; int N, M; int stride; - celt_assert(layer->nb_neurons <= MAX_NEURONS); M = layer->nb_inputs; N = layer->nb_neurons; stride = N; @@ -130,6 +129,27 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input) void compute_mdense(const MDenseLayer *layer, float *output, const float *input) { + int i, c; + int N, M, C; + int stride; + M = layer->nb_inputs; + N = layer->nb_neurons; + C = layer->nb_channels; + /* FIXME: Make this C90. */ + float tmp[N*C]; + stride = N*C; + for (i=0;ibias[i]; + gemm_accum(tmp, layer->input_weights, N*C, M, stride, input); + compute_activation(tmp, tmp, N*C, ACTIVATION_TANH); + for (i=0;ifactor[c*N + i]; + } + compute_activation(output, output, N, layer->activation); } void compute_gru(const GRULayer *gru, float *state, const float *input) @@ -189,7 +209,6 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f int N, M; int stride; float tmp[MAX_CONV_INPUTS]; - celt_assert(layer->nb_neurons <= MAX_NEURONS); celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS); M = layer->nb_inputs; N = layer->nb_neurons;