Code for building a model struct

This commit is contained in:
Jean-Marc Valin 2023-05-16 23:15:49 -04:00
parent cc714cc5b0
commit 71c8a23fc1
4 changed files with 200 additions and 4 deletions

View file

@ -145,4 +145,57 @@ void compute_gru_a_input(float *output, const float *input, int N, const Embeddi
int sample_from_pdf(const float *pdf, int N, float exp_boost, float pdf_floor); int sample_from_pdf(const float *pdf, int N, float exp_boost, float pdf_floor);
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
const char *factor,
int nb_inputs,
int nb_neurons,
int nb_channels,
int activation);
int dense_init(DenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int nb_neurons,
int activation);
int gru_init(GRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *input_weights,
const char *input_weights_idx,
const char *recurrent_weights,
int nb_inputs,
int nb_neurons,
int activation,
int reset_after);
int sparse_gru_init(SparseGRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *diag_weights,
const char *recurrent_weights,
const char *idx,
int nb_neurons,
int activation,
int reset_after);
int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int kernel_size,
int nb_neurons,
int activation);
int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
const char *embedding_weights,
int nb_inputs,
int dim);
#endif /* _MLP_H_ */ #endif /* _MLP_H_ */

View file

@ -71,6 +71,120 @@ int parse_weights(WeightArray **list, const unsigned char *data, int len)
return nb_arrays; return nb_arrays;
} }
static const void *find_array(const WeightArray *arrays, const char *name) {
while (arrays->name && strcmp(arrays->name, name) != 0) arrays++;
return arrays->data;
}
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
const char *factor,
int nb_inputs,
int nb_neurons,
int nb_channels,
int activation)
{
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
if ((layer->factor = find_array(arrays, factor)) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->nb_channels = nb_channels;
layer->activation = activation;
return 0;
}
int dense_init(DenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int nb_neurons,
int activation)
{
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
return 0;
}
int gru_init(GRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *input_weights,
const char *input_weights_idx,
const char *recurrent_weights,
int nb_inputs,
int nb_neurons,
int activation,
int reset_after)
{
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
if ((layer->input_weights_idx = find_array(arrays, input_weights_idx)) == NULL) return 1;
if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
layer->reset_after = reset_after;
return 0;
}
int sparse_gru_init(SparseGRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *diag_weights,
const char *recurrent_weights,
const char *idx,
int nb_neurons,
int activation,
int reset_after)
{
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
if ((layer->diag_weights = find_array(arrays, diag_weights)) == NULL) return 1;
if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
if ((layer->idx = find_array(arrays, idx)) == NULL) return 1;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
layer->reset_after = reset_after;
return 0;
}
int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int kernel_size,
int nb_neurons,
int activation)
{
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->kernel_size = kernel_size;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
return 0;
}
int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
const char *embedding_weights,
int nb_inputs,
int dim)
{
if ((layer->embedding_weights = find_array(arrays, embedding_weights)) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->dim = dim;
return 0;
}
#if 0
#include <fcntl.h> #include <fcntl.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <unistd.h> #include <unistd.h>
@ -102,3 +216,4 @@ int main()
close(fd); close(fd);
return 0; return 0;
} }
#endif

View file

@ -26,6 +26,7 @@
''' '''
import os import os
import io
import lpcnet import lpcnet
import sys import sys
import numpy as np import numpy as np
@ -39,7 +40,6 @@ import h5py
import re import re
import argparse import argparse
array_list = []
# no cuda devices needed # no cuda devices needed
os.environ['CUDA_VISIBLE_DEVICES'] = "" os.environ['CUDA_VISIBLE_DEVICES'] = ""
@ -148,6 +148,9 @@ def dump_sparse_gru(self, f, hf):
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3)) hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3)) hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
hf.write('extern const SparseGRULayer {};\n\n'.format(name)); hf.write('extern const SparseGRULayer {};\n\n'.format(name));
model_struct.write(' SparseGRULayer {};\n'.format(name));
model_init.write(' if (sparse_gru_init(&model->{}, arrays, "{}_bias", "{}_subias", "{}_recurrent_weights_diag", "{}_recurrent_weights", "{}_recurrent_weights_idx", {}, ACTIVATION_{}, {})) return 1;\n'
.format(name, name, name, name, name, name, weights[0].shape[1]//3, activation, reset_after))
return True return True
def dump_grub(self, f, hf, gru_a_size): def dump_grub(self, f, hf, gru_a_size):
@ -182,6 +185,9 @@ def dump_grub(self, f, hf, gru_a_size):
f.write('const GRULayer {} = {{\n {}_bias,\n {}_subias,\n {}_weights,\n {}_weights_idx,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n' f.write('const GRULayer {} = {{\n {}_bias,\n {}_subias,\n {}_weights,\n {}_weights_idx,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, name, name, gru_a_size, weights[0].shape[1]//3, activation, reset_after)) .format(name, name, name, name, name, name, gru_a_size, weights[0].shape[1]//3, activation, reset_after))
hf.write('extern const GRULayer {};\n\n'.format(name)); hf.write('extern const GRULayer {};\n\n'.format(name));
model_struct.write(' GRULayer {};\n'.format(name));
model_init.write(' if (gru_init(&model->{}, arrays, "{}_bias", "{}_subias", "{}_weights", "{}_weights_idx", "{}_recurrent_weights", {}, {}, ACTIVATION_{}, {})) return 1;\n'
.format(name, name, name, name, name, name, gru_a_size, weights[0].shape[1]//3, activation, reset_after))
return True return True
def dump_gru_layer_dummy(self, f, hf): def dump_gru_layer_dummy(self, f, hf):
@ -200,6 +206,9 @@ def dump_dense_layer_impl(name, weights, bias, activation, f, hf):
.format(name, name, name, weights.shape[0], weights.shape[1], activation)) .format(name, name, name, weights.shape[0], weights.shape[1], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1])) hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
hf.write('extern const DenseLayer {};\n\n'.format(name)); hf.write('extern const DenseLayer {};\n\n'.format(name));
model_struct.write(' DenseLayer {};\n'.format(name));
model_init.write(' if (dense_init(&model->{}, arrays, "{}_bias", "{}_weights", {}, {}, ACTIVATION_{})) return 1;\n'
.format(name, name, name, weights.shape[0], weights.shape[1], activation))
def dump_dense_layer(self, f, hf): def dump_dense_layer(self, f, hf):
name = self.name name = self.name
@ -225,6 +234,9 @@ def dump_mdense_layer(self, f, hf):
.format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation)) .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0])) hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
hf.write('extern const MDenseLayer {};\n\n'.format(name)); hf.write('extern const MDenseLayer {};\n\n'.format(name));
model_struct.write(' MDenseLayer {};\n'.format(name));
model_init.write(' if (mdense_init(&model->{}, arrays, "{}_bias", "{}_weights", "{}_factor", {}, {}, {}, ACTIVATION_{})) return 1;\n'
.format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
return False return False
MDense.dump_layer = dump_mdense_layer MDense.dump_layer = dump_mdense_layer
@ -243,6 +255,9 @@ def dump_conv1d_layer(self, f, hf):
hf.write('#define {}_STATE_SIZE ({}*{})\n'.format(name.upper(), weights[0].shape[1], (weights[0].shape[0]-1))) hf.write('#define {}_STATE_SIZE ({}*{})\n'.format(name.upper(), weights[0].shape[1], (weights[0].shape[0]-1)))
hf.write('#define {}_DELAY {}\n'.format(name.upper(), (weights[0].shape[0]-1)//2)) hf.write('#define {}_DELAY {}\n'.format(name.upper(), (weights[0].shape[0]-1)//2))
hf.write('extern const Conv1DLayer {};\n\n'.format(name)); hf.write('extern const Conv1DLayer {};\n\n'.format(name));
model_struct.write(' Conv1DLayer {};\n'.format(name));
model_init.write(' if (conv1d_init(&model->{}, arrays, "{}_bias", "{}_weights", {}, {}, {}, ACTIVATION_{})) return 1;\n'
.format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
return True return True
Conv1D.dump_layer = dump_conv1d_layer Conv1D.dump_layer = dump_conv1d_layer
@ -253,6 +268,9 @@ def dump_embedding_layer_impl(name, weights, f, hf):
.format(name, name, weights.shape[0], weights.shape[1])) .format(name, name, weights.shape[0], weights.shape[1]))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1])) hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
hf.write('extern const EmbeddingLayer {};\n\n'.format(name)); hf.write('extern const EmbeddingLayer {};\n\n'.format(name));
model_struct.write(' EmbeddingLayer {};\n'.format(name));
model_init.write(' if (embedding_init(&model->{}, arrays, "{}_weights", {}, {})) return 1;\n'
.format(name, name, weights.shape[0], weights.shape[1]))
def dump_embedding_layer(self, f, hf): def dump_embedding_layer(self, f, hf):
name = self.name name = self.name
@ -291,6 +309,11 @@ if __name__ == "__main__":
f = open(cfile, 'w') f = open(cfile, 'w')
hf = open(hfile, 'w') hf = open(hfile, 'w')
model_struct = io.StringIO()
model_init = io.StringIO()
model_struct.write('typedef struct {\n')
model_init.write('int init_lpcnet_model(LPCNetModel *model, const WeightArray *arrays) {\n')
array_list = []
f.write('/*This file is automatically generated from a Keras model*/\n') f.write('/*This file is automatically generated from a Keras model*/\n')
f.write('/*based on model {}*/\n\n'.format(sys.argv[1])) f.write('/*based on model {}*/\n\n'.format(sys.argv[1]))
@ -359,7 +382,10 @@ if __name__ == "__main__":
f.write(' {{"{}", WEIGHTS_{}_TYPE, sizeof({}), {}}},\n'.format(name, name, name, name)) f.write(' {{"{}", WEIGHTS_{}_TYPE, sizeof({}), {}}},\n'.format(name, name, name, name))
f.write('#endif\n') f.write('#endif\n')
f.write(' {NULL, 0, 0}\n};\n') f.write(' {NULL, 0, 0}\n};\n')
f.write('#endif\n') f.write('#endif\n\n')
model_init.write(' return 0;\n}\n')
f.write(model_init.getvalue())
hf.write('#define MAX_RNN_NEURONS {}\n\n'.format(max_rnn_neurons)) hf.write('#define MAX_RNN_NEURONS {}\n\n'.format(max_rnn_neurons))
hf.write('#define MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs)) hf.write('#define MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs))
@ -369,8 +395,10 @@ if __name__ == "__main__":
hf.write('typedef struct {\n') hf.write('typedef struct {\n')
for i, name in enumerate(layer_list): for i, name in enumerate(layer_list):
hf.write(' float {}_state[{}_STATE_SIZE];\n'.format(name, name.upper())) hf.write(' float {}_state[{}_STATE_SIZE];\n'.format(name, name.upper()))
hf.write('} NNetState;\n') hf.write('} NNetState;\n\n')
model_struct.write('} LPCNetModel;\n\n')
hf.write(model_struct.getvalue())
hf.write('\n\n#endif\n') hf.write('\n\n#endif\n')
f.close() f.close()

View file

@ -39,7 +39,7 @@ void write_weights(const WeightArray *list, FILE *fout)
unsigned char zeros[WEIGHT_BLOCK_SIZE] = {0}; unsigned char zeros[WEIGHT_BLOCK_SIZE] = {0};
while (list[i].name != NULL) { while (list[i].name != NULL) {
WeightHead h; WeightHead h;
strcpy(h.head, "DNNw"); memcpy(h.head, "DNNw", 4);
h.version = WEIGHT_BLOB_VERSION; h.version = WEIGHT_BLOB_VERSION;
h.type = list[i].type; h.type = list[i].type;
h.size = list[i].size; h.size = list[i].size;