Work in progress translation to C

This commit is contained in:
Jean-Marc Valin 2018-11-23 19:43:58 -05:00
parent 8caaa5e917
commit b9cd61be8b
4 changed files with 482 additions and 0 deletions

130
dnn/dump_lpcnet.py Executable file
View file

@ -0,0 +1,130 @@
#!/usr/bin/python3
import lpcnet
import sys
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Layer, GRU, CuDNNGRU, Dense, Conv1D, Embedding
from ulaw import ulaw2lin, lin2ulaw
from mdense import MDense
import keras.backend as K
import h5py
import re
def printVector(f, vector, name):
v = np.reshape(vector, (-1));
#print('static const float ', name, '[', len(v), '] = \n', file=f)
f.write('static const float {}[{}] = {{\n '.format(name, len(v)))
for i in range(0, len(v)):
f.write('{}'.format(v[i]))
if (i!=len(v)-1):
f.write(',')
else:
break;
if (i%8==7):
f.write("\n ")
else:
f.write(" ")
#print(v, file=f)
f.write('\n};\n\n')
return;
def dump_layer_ignore(self, f, hf):
print("ignoring layer " + self.name + " of type " + self.__class__.__name__)
False
Layer.dump_layer = dump_layer_ignore
def dump_gru_layer(self, f, hf):
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[1], name + '_recurrent_weights')
printVector(f, weights[-1], name + '_bias')
#activation = re.search('function (.*) at', str(layer.activation)).group(1).upper()
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after'):
reset_after = self.reset_after
else:
reset_after = 1
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]//3, activation, reset_after))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
hf.write('extern const GRULayer {};\n\n'.format(name));
True
CuDNNGRU.dump_layer = dump_gru_layer
GRU.dump_layer = dump_gru_layer
def dump_dense_layer(self, f, hf):
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[-1], name + '_bias')
#activation = re.search('function (.*) at', str(layer.activation)).group(1).upper()
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
hf.write('extern const DenseLayer {};\n\n'.format(name));
False
Dense.dump_layer = dump_dense_layer
def dump_mdense_layer(self, f, hf):
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[1], name + '_bias')
printVector(f, weights[1], name + '_factor')
#activation = re.search('function (.*) at', str(layer.activation)).group(1).upper()
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
hf.write('extern const MDenseLayer {};\n\n'.format(name));
False
MDense.dump_layer = dump_mdense_layer
model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=640)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()
model.load_weights(sys.argv[1])
f = open(sys.argv[2], 'w')
hf = open(sys.argv[3], 'w')
f.write('/*This file is automatically generated from a Keras model*/\n\n')
f.write('#ifdef HAVE_CONFIG_H\n#include "config.h"\n#endif\n\n#include "nnet.h"\n#include "foo.h"\n\n')
hf.write('/*This file is automatically generated from a Keras model*/\n\n')
hf.write('#ifndef RNN_DATA_H\n#define RNN_DATA_H\n\n#include "{}"\n\n'.format(sys.argv[3]))
layer_list = []
for i, layer in enumerate(model.layers):
if layer.dump_layer(f, hf):
layer_list.append(layer.name)
hf.write('struct RNNState {\n')
for i, name in enumerate(layer_list):
hf.write(' float {}_state[{}_SIZE];\n'.format(name, name.upper()))
hf.write('};\n')
hf.write('\n\n#endif\n')
f.close()
hf.close()

213
dnn/nnet.c Normal file
View file

@ -0,0 +1,213 @@
/* Copyright (c) 2018 Mozilla
2008-2011 Octasic Inc.
2012-2017 Jean-Marc Valin */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <math.h>
#include "opus_types.h"
#include "arch.h"
#include "common.h"
#include "tansig_table.h"
#include "nnet.h"
static OPUS_INLINE float tansig_approx(float x)
{
int i;
float y, dy;
float sign=1;
/* Tests are reversed to catch NaNs */
if (!(x<8))
return 1;
if (!(x>-8))
return -1;
#ifndef FIXED_POINT
/* Another check in case of -ffast-math */
if (celt_isnan(x))
return 0;
#endif
if (x<0)
{
x=-x;
sign=-1;
}
i = (int)floor(.5f+25*x);
x -= .04f*i;
y = tansig_table[i];
dy = 1-y*y;
y = y + x*dy*(1 - y*x);
return sign*y;
}
static OPUS_INLINE float sigmoid_approx(float x)
{
return .5f + .5f*tansig_approx(.5f*x);
}
static OPUS_INLINE float relu(float x)
{
return x < 0 ? 0 : x;
}
static void gemm_accum(float *out, const float *weights, int rows, int cols, int col_stride, const float *x)
{
int i, j;
for (i=0;i<rows;i++)
{
for (j=0;j<cols;j++)
out[i] += weights[j*col_stride + i]*x[j];
}
}
void compute_activation(float *output, float *input, int N, int activation)
{
int i;
if (activation == ACTIVATION_SIGMOID) {
for (i=0;i<N;i++)
output[i] = sigmoid_approx(input[i]);
} else if (activation == ACTIVATION_TANH) {
for (i=0;i<N;i++)
output[i] = tansig_approx(input[i]);
} else if (activation == ACTIVATION_RELU) {
for (i=0;i<N;i++)
output[i] = relu(input[i]);
} else if (activation == ACTIVATION_SOFTMAX) {
float sum = 0;
for (i=0;i<N;i++) {
output[i] = exp(input[i]);
sum += output[i];
}
sum = 1.f/(sum+1e-30);
for (i=0;i<N;i++)
output[i] = sum*output[i];
} else {
celt_assert(layer->activation == ACTIVATION_LINEAR);
for (i=0;i<N;i++)
output[i] = input[i];
}
}
void compute_dense(const DenseLayer *layer, float *output, const float *input)
{
int i;
int N, M;
int stride;
celt_assert(layer->nb_neurons <= MAX_NEURONS);
M = layer->nb_inputs;
N = layer->nb_neurons;
stride = N;
for (i=0;i<N;i++)
output[i] = layer->bias[i];
gemm_accum(output, layer->input_weights, N, M, stride, input);
compute_activation(output, output, N, layer->activation);
}
void compute_gru(const GRULayer *gru, float *state, const float *input)
{
int i;
int N, M;
int stride;
float tmp[MAX_NEURONS];
float z[MAX_NEURONS];
float r[MAX_NEURONS];
float h[MAX_NEURONS];
celt_assert(gru->nb_neurons <= MAX_NEURONS);
M = gru->nb_inputs;
N = gru->nb_neurons;
stride = 3*N;
/* Compute update gate. */
for (i=0;i<N;i++)
z[i] = gru->bias[i];
gemm_accum(z, gru->input_weights, N, M, stride, input);
gemm_accum(z, gru->recurrent_weights, N, N, stride, state);
for (i=0;i<N;i++)
z[i] = sigmoid_approx(z[i]);
/* Compute reset gate. */
for (i=0;i<N;i++)
r[i] = gru->bias[N + i];
gemm_accum(r, &gru->input_weights[N], N, M, stride, input);
gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state);
for (i=0;i<N;i++)
r[i] = sigmoid_approx(r[i]);
/* Compute output. */
for (i=0;i<N;i++)
h[i] = gru->bias[2*N + i];
if (gru->reset_after)
{
/* WARNING: The reset_after version was never tested. */
RNN_CLEAR(tmp, N);
gemm_accum(tmp, &gru->recurrent_weights[2*N], N, N, stride, state);
for (i=0;i<N;i++)
h[i] += tmp[i] * r[i];
gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input);
} else {
for (i=0;i<N;i++)
tmp[i] = state[i] * r[i];
gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input);
gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp);
}
for (i=0;i<N;i++)
h[i] = z[i]*state[i] + (1-z[i])*tansig_approx(h[i]);
for (i=0;i<N;i++)
state[i] = h[i];
}
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input)
{
int i;
int N, M;
int stride;
float tmp[MAX_CONV_INPUTS];
celt_assert(layer->nb_neurons <= MAX_NEURONS);
celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
M = layer->nb_inputs;
N = layer->nb_neurons;
RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1));
RNN_COPY(tmp, input, layer->nb_inputs);
M = layer->nb_inputs;
N = layer->nb_neurons;
stride = N;
for (i=0;i<N;i++)
output[i] = layer->bias[i];
gemm_accum(output, layer->input_weights, N, M, stride, input);
compute_activation(output, output, N, layer->activation);
RNN_COPY(mem, &tmp[layer->nb_inputs], layer->nb_inputs*(layer->kernel_size-1));
}
void compute_embedding(const EmbeddingLayer *layer, float *output, int input)
{
int i;
for (i=0;i<layer->dim;i++)
{
output[i] = layer->embedding_weights[input*layer->dim + i];
}
}

94
dnn/nnet.h Normal file
View file

@ -0,0 +1,94 @@
/* Copyright (c) 2018 Mozilla
Copyright (c) 2017 Jean-Marc Valin */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NNET_H_
#define _NNET_H_
#define MAX_NEURONS 1024
#define MAX_CONV_INPUTS 1024
#define ACTIVATION_LINEAR 0
#define ACTIVATION_SIGMOID 1
#define ACTIVATION_TANH 2
#define ACTIVATION_RELU 3
#define ACTIVATION_SOFTMAX 4
typedef struct {
const float *bias;
const float *input_weights;
int nb_inputs;
int nb_neurons;
int activation;
} DenseLayer;
typedef struct {
const float *bias;
const float *input_weights;
const float *factor;
int nb_inputs;
int nb_neurons;
int nb_channels;
int activation;
} MDenseLayer;
typedef struct {
const float *bias;
const float *input_weights;
const float *recurrent_weights;
int nb_inputs;
int nb_neurons;
int activation;
int reset_after;
} GRULayer;
typedef struct {
const float *bias;
const float *input_weights;
int nb_inputs;
int kernel_size;
int nb_neurons;
int activation;
} Conv1DLayer;
typedef struct {
const float *embedding_weights;
int nb_inputs;
int dim;
} EmbeddingLayer;
void compute_activation(float *output, float *input, int N, int activation);
void compute_dense(const DenseLayer *layer, float *output, const float *input);
void compute_gru(const GRULayer *gru, float *state, const float *input);
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
void compute_embedding(const EmbeddingLayer *layer, float *output, int input);
#endif /* _MLP_H_ */

45
dnn/tansig_table.h Normal file
View file

@ -0,0 +1,45 @@
/* This file is auto-generated by gen_tables */
static const float tansig_table[201] = {
0.000000f, 0.039979f, 0.079830f, 0.119427f, 0.158649f,
0.197375f, 0.235496f, 0.272905f, 0.309507f, 0.345214f,
0.379949f, 0.413644f, 0.446244f, 0.477700f, 0.507977f,
0.537050f, 0.564900f, 0.591519f, 0.616909f, 0.641077f,
0.664037f, 0.685809f, 0.706419f, 0.725897f, 0.744277f,
0.761594f, 0.777888f, 0.793199f, 0.807569f, 0.821040f,
0.833655f, 0.845456f, 0.856485f, 0.866784f, 0.876393f,
0.885352f, 0.893698f, 0.901468f, 0.908698f, 0.915420f,
0.921669f, 0.927473f, 0.932862f, 0.937863f, 0.942503f,
0.946806f, 0.950795f, 0.954492f, 0.957917f, 0.961090f,
0.964028f, 0.966747f, 0.969265f, 0.971594f, 0.973749f,
0.975743f, 0.977587f, 0.979293f, 0.980869f, 0.982327f,
0.983675f, 0.984921f, 0.986072f, 0.987136f, 0.988119f,
0.989027f, 0.989867f, 0.990642f, 0.991359f, 0.992020f,
0.992631f, 0.993196f, 0.993718f, 0.994199f, 0.994644f,
0.995055f, 0.995434f, 0.995784f, 0.996108f, 0.996407f,
0.996682f, 0.996937f, 0.997172f, 0.997389f, 0.997590f,
0.997775f, 0.997946f, 0.998104f, 0.998249f, 0.998384f,
0.998508f, 0.998623f, 0.998728f, 0.998826f, 0.998916f,
0.999000f, 0.999076f, 0.999147f, 0.999213f, 0.999273f,
0.999329f, 0.999381f, 0.999428f, 0.999472f, 0.999513f,
0.999550f, 0.999585f, 0.999617f, 0.999646f, 0.999673f,
0.999699f, 0.999722f, 0.999743f, 0.999763f, 0.999781f,
0.999798f, 0.999813f, 0.999828f, 0.999841f, 0.999853f,
0.999865f, 0.999875f, 0.999885f, 0.999893f, 0.999902f,
0.999909f, 0.999916f, 0.999923f, 0.999929f, 0.999934f,
0.999939f, 0.999944f, 0.999948f, 0.999952f, 0.999956f,
0.999959f, 0.999962f, 0.999965f, 0.999968f, 0.999970f,
0.999973f, 0.999975f, 0.999977f, 0.999978f, 0.999980f,
0.999982f, 0.999983f, 0.999984f, 0.999986f, 0.999987f,
0.999988f, 0.999989f, 0.999990f, 0.999990f, 0.999991f,
0.999992f, 0.999992f, 0.999993f, 0.999994f, 0.999994f,
0.999994f, 0.999995f, 0.999995f, 0.999996f, 0.999996f,
0.999996f, 0.999997f, 0.999997f, 0.999997f, 0.999997f,
0.999997f, 0.999998f, 0.999998f, 0.999998f, 0.999998f,
0.999998f, 0.999998f, 0.999999f, 0.999999f, 0.999999f,
0.999999f, 0.999999f, 0.999999f, 0.999999f, 0.999999f,
0.999999f, 0.999999f, 0.999999f, 0.999999f, 0.999999f,
1.000000f, 1.000000f, 1.000000f, 1.000000f, 1.000000f,
1.000000f, 1.000000f, 1.000000f, 1.000000f, 1.000000f,
1.000000f,
};