mirror of
https://github.com/xiph/opus.git
synced 2025-06-03 09:07:42 +00:00
Add validation for weights blob
This commit is contained in:
parent
0098fe70ac
commit
c7b6935bf2
4 changed files with 62 additions and 25 deletions
|
@ -6,7 +6,7 @@ srcdir=`dirname $0`
|
|||
test -n "$srcdir" && cd "$srcdir"
|
||||
|
||||
#SHA1 of the first commit compatible with the current model
|
||||
commit=301a9fb
|
||||
commit=f1071fa
|
||||
./download_model.sh $commit
|
||||
|
||||
echo "Updating build configuration files for lpcnet, please wait...."
|
||||
|
|
|
@ -174,6 +174,7 @@ LPCNET_EXPORT int lpcnet_get_size()
|
|||
LPCNET_EXPORT int lpcnet_init(LPCNetState *lpcnet)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
const char* rng_string="LPCNet";
|
||||
memset(lpcnet, 0, lpcnet_get_size());
|
||||
lpcnet->last_exc = lin2ulaw(0.f);
|
||||
|
@ -182,8 +183,9 @@ LPCNET_EXPORT int lpcnet_init(LPCNetState *lpcnet)
|
|||
lpcnet->sampling_logit_table[i] = -log((1-prob)/prob);
|
||||
}
|
||||
kiss99_srand(&lpcnet->rng, (const unsigned char *)rng_string, strlen(rng_string));
|
||||
init_lpcnet_model(&lpcnet->model, lpcnet_arrays);
|
||||
return 0;
|
||||
ret = init_lpcnet_model(&lpcnet->model, lpcnet_arrays);
|
||||
celt_assert(ret == 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
|
||||
#include "nnet.h"
|
||||
|
||||
#define SPARSE_BLOCK_SIZE 32
|
||||
|
||||
extern const WeightArray lpcnet_arrays[];
|
||||
|
||||
int parse_record(const unsigned char **data, int *len, WeightArray *array) {
|
||||
|
@ -71,9 +73,40 @@ int parse_weights(WeightArray **list, const unsigned char *data, int len)
|
|||
return nb_arrays;
|
||||
}
|
||||
|
||||
static const void *find_array(const WeightArray *arrays, const char *name) {
|
||||
static const void *find_array_entry(const WeightArray *arrays, const char *name) {
|
||||
while (arrays->name && strcmp(arrays->name, name) != 0) arrays++;
|
||||
return arrays->data;
|
||||
return arrays;
|
||||
}
|
||||
|
||||
static const void *find_array_check(const WeightArray *arrays, const char *name, int size) {
|
||||
const WeightArray *a = find_array_entry(arrays, name);
|
||||
if (a && a->size == size) return a->data;
|
||||
else return NULL;
|
||||
}
|
||||
|
||||
static const void *find_idx_check(const WeightArray *arrays, const char *name, int nb_in, int nb_out, int *total_blocks) {
|
||||
int remain;
|
||||
const int *idx;
|
||||
const WeightArray *a = find_array_entry(arrays, name);
|
||||
*total_blocks = 0;
|
||||
if (a == NULL) return NULL;
|
||||
idx = a->data;
|
||||
remain = a->size/sizeof(int);
|
||||
while (remain > 0) {
|
||||
int nb_blocks;
|
||||
int i;
|
||||
nb_blocks = *idx++;
|
||||
if (remain < nb_blocks+1) return NULL;
|
||||
for (i=0;i<nb_blocks;i++) {
|
||||
int pos = *idx++;
|
||||
if (pos+3 >= nb_in || (pos&0x3)) return NULL;
|
||||
}
|
||||
nb_out -= 8;
|
||||
remain -= nb_blocks+1;
|
||||
*total_blocks += nb_blocks;
|
||||
}
|
||||
if (nb_out != 0) return NULL;
|
||||
return a->data;
|
||||
}
|
||||
|
||||
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
|
||||
|
@ -85,9 +118,9 @@ int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
|
|||
int nb_channels,
|
||||
int activation)
|
||||
{
|
||||
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
|
||||
if ((layer->factor = find_array(arrays, factor)) == NULL) return 1;
|
||||
if ((layer->bias = find_array_check(arrays, bias, nb_neurons*nb_channels*sizeof(layer->bias[0]))) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_channels*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
|
||||
if ((layer->factor = find_array_check(arrays, factor, nb_channels*nb_neurons*sizeof(layer->factor[0]))) == NULL) return 1;
|
||||
layer->nb_inputs = nb_inputs;
|
||||
layer->nb_neurons = nb_neurons;
|
||||
layer->nb_channels = nb_channels;
|
||||
|
@ -102,8 +135,8 @@ int dense_init(DenseLayer *layer, const WeightArray *arrays,
|
|||
int nb_neurons,
|
||||
int activation)
|
||||
{
|
||||
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
|
||||
if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
|
||||
layer->nb_inputs = nb_inputs;
|
||||
layer->nb_neurons = nb_neurons;
|
||||
layer->activation = activation;
|
||||
|
@ -121,11 +154,12 @@ int gru_init(GRULayer *layer, const WeightArray *arrays,
|
|||
int activation,
|
||||
int reset_after)
|
||||
{
|
||||
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
|
||||
if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
|
||||
if ((layer->input_weights_idx = find_array(arrays, input_weights_idx)) == NULL) return 1;
|
||||
if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
|
||||
int total_blocks;
|
||||
if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
|
||||
if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
|
||||
if ((layer->input_weights_idx = find_idx_check(arrays, input_weights_idx, nb_inputs, 3*nb_neurons, &total_blocks)) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array_check(arrays, input_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->input_weights[0]))) == NULL) return 1;
|
||||
if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, 3*nb_neurons*nb_neurons*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
|
||||
layer->nb_inputs = nb_inputs;
|
||||
layer->nb_neurons = nb_neurons;
|
||||
layer->activation = activation;
|
||||
|
@ -143,11 +177,12 @@ int sparse_gru_init(SparseGRULayer *layer, const WeightArray *arrays,
|
|||
int activation,
|
||||
int reset_after)
|
||||
{
|
||||
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
|
||||
if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
|
||||
if ((layer->diag_weights = find_array(arrays, diag_weights)) == NULL) return 1;
|
||||
if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
|
||||
if ((layer->idx = find_array(arrays, idx)) == NULL) return 1;
|
||||
int total_blocks;
|
||||
if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
|
||||
if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
|
||||
if ((layer->diag_weights = find_array_check(arrays, diag_weights, 3*nb_neurons*sizeof(layer->diag_weights[0]))) == NULL) return 1;
|
||||
if ((layer->idx = find_idx_check(arrays, idx, nb_neurons, 3*nb_neurons, &total_blocks)) == NULL) return 1;
|
||||
if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
|
||||
layer->nb_neurons = nb_neurons;
|
||||
layer->activation = activation;
|
||||
layer->reset_after = reset_after;
|
||||
|
@ -162,8 +197,8 @@ int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
|
|||
int nb_neurons,
|
||||
int activation)
|
||||
{
|
||||
if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
|
||||
if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
|
||||
if ((layer->input_weights = find_array_check(arrays, input_weights, kernel_size*nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
|
||||
layer->nb_inputs = nb_inputs;
|
||||
layer->kernel_size = kernel_size;
|
||||
layer->nb_neurons = nb_neurons;
|
||||
|
@ -176,7 +211,7 @@ int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
|
|||
int nb_inputs,
|
||||
int dim)
|
||||
{
|
||||
if ((layer->embedding_weights = find_array(arrays, embedding_weights)) == NULL) return 1;
|
||||
if ((layer->embedding_weights = find_array_check(arrays, embedding_weights, nb_inputs*dim*sizeof(layer->embedding_weights[0]))) == NULL) return 1;
|
||||
layer->nb_inputs = nb_inputs;
|
||||
layer->dim = dim;
|
||||
return 0;
|
||||
|
|
|
@ -340,13 +340,13 @@ if __name__ == "__main__":
|
|||
W = model.get_layer('gru_a').get_weights()[0][3*embed_size:,:]
|
||||
#FIXME: dump only half the biases
|
||||
b = model.get_layer('gru_a').get_weights()[2]
|
||||
dump_dense_layer_impl('gru_a_dense_feature', W, b, 'LINEAR', f, hf)
|
||||
dump_dense_layer_impl('gru_a_dense_feature', W, b[:len(b)//2], 'LINEAR', f, hf)
|
||||
|
||||
W = model.get_layer('gru_b').get_weights()[0][model.rnn_units1:,:]
|
||||
b = model.get_layer('gru_b').get_weights()[2]
|
||||
# Set biases to zero because they'll be included in the GRU input part
|
||||
# (we need regular and SU biases)
|
||||
dump_dense_layer_impl('gru_b_dense_feature', W, 0*b, 'LINEAR', f, hf)
|
||||
dump_dense_layer_impl('gru_b_dense_feature', W, 0*b[:len(b)//2], 'LINEAR', f, hf)
|
||||
dump_grub(model.get_layer('gru_b'), f, hf, model.rnn_units1)
|
||||
|
||||
layer_list = []
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue