From d4046036a90c8b3931d2eac91b71ec14b7ba8053 Mon Sep 17 00:00:00 2001 From: Jean-Marc Valin Date: Sat, 24 Nov 2018 11:32:01 -0500 Subject: [PATCH] Dump Conv1D (didn't check weight ordering at all) --- dnn/dump_lpcnet.py | 25 +++++++++++++++++-------- dnn/nnet.c | 13 ++++++++----- dnn/nnet.h | 2 ++ 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/dnn/dump_lpcnet.py b/dnn/dump_lpcnet.py index b1047460..bda61633 100755 --- a/dnn/dump_lpcnet.py +++ b/dnn/dump_lpcnet.py @@ -64,10 +64,7 @@ def dump_dense_layer(self, f, hf): weights = self.get_weights() printVector(f, weights[0], name + '_weights') printVector(f, weights[-1], name + '_bias') - if hasattr(self, 'activation'): - activation = self.activation.__name__.upper() - else: - activation = 'TANH' + activation = self.activation.__name__.upper() f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n' .format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation)) hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1])) @@ -82,10 +79,7 @@ def dump_mdense_layer(self, f, hf): printVector(f, weights[0], name + '_weights') printVector(f, weights[1], name + '_bias') printVector(f, weights[1], name + '_factor') - if hasattr(self, 'activation'): - activation = self.activation.__name__.upper() - else: - activation = 'TANH' + activation = self.activation.__name__.upper() f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n' .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation)) hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0])) @@ -93,6 +87,21 @@ def dump_mdense_layer(self, f, hf): return False MDense.dump_layer = dump_mdense_layer +def dump_conv1d_layer(self, f, hf): + name = self.name + print("printing layer " + name + " of type " + self.__class__.__name__) + weights = self.get_weights() + printVector(f, weights[0], name + '_weights') + printVector(f, weights[-1], name + '_bias') + activation = self.activation.__name__.upper() + f.write('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n' + .format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation)) + hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1])) + hf.write('extern const Conv1DLayer {};\n\n'.format(name)); + return False +Conv1D.dump_layer = dump_conv1d_layer + + def dump_embedding_layer(self, f, hf): name = self.name print("printing layer " + name + " of type " + self.__class__.__name__) diff --git a/dnn/nnet.c b/dnn/nnet.c index 0db611e5..c62a5984 100644 --- a/dnn/nnet.c +++ b/dnn/nnet.c @@ -128,6 +128,10 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input) compute_activation(output, output, N, layer->activation); } +void compute_mdense(const MDenseLayer *layer, float *output, const float *input) +{ +} + void compute_gru(const GRULayer *gru, float *state, const float *input) { int i; @@ -146,16 +150,14 @@ void compute_gru(const GRULayer *gru, float *state, const float *input) z[i] = gru->bias[i]; gemm_accum(z, gru->input_weights, N, M, stride, input); gemm_accum(z, gru->recurrent_weights, N, N, stride, state); - for (i=0;ibias[N + i]; gemm_accum(r, &gru->input_weights[N], N, M, stride, input); gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state); - for (i=0;iinput_weights[2*N], N, M, stride, input); gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp); } + compute_activation(h, h, N, gru->activation); for (i=0;i