From caca188b5a0275b1c04baf8fcc9798b900692a2c Mon Sep 17 00:00:00 2001 From: Jean-Marc Valin Date: Thu, 21 Dec 2023 23:05:40 -0500 Subject: [PATCH] Make loss simulator standalone --- dnn/lossgen.c | 61 ++++++++++++++++++++++++++++++++++++++++---- dnn/lpcnet_private.h | 1 - dnn/nnet.h | 1 + 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/dnn/lossgen.c b/dnn/lossgen.c index 15505873..fa1ee71d 100644 --- a/dnn/lossgen.c +++ b/dnn/lossgen.c @@ -6,7 +6,53 @@ #include "lossgen.h" #include "os_support.h" #include "nnet.h" -#include "lpcnet_private.h" + +/* Disable RTCD for this. */ +#define RTCD_ARCH c + +#include "nnet_arch.h" + +#define MAX_RNN_NEURONS_ALL IMAX(LOSSGEN_GRU1_STATE_SIZE, LOSSGEN_GRU2_STATE_SIZE) + +/* These two functions are copied from nnet.c to make sure we don't have linking issues. */ +void compute_generic_gru_lossgen(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch) +{ + int i; + int N; + float zrh[3*MAX_RNN_NEURONS_ALL]; + float recur[3*MAX_RNN_NEURONS_ALL]; + float *z; + float *r; + float *h; + celt_assert(3*recurrent_weights->nb_inputs == recurrent_weights->nb_outputs); + celt_assert(input_weights->nb_outputs == recurrent_weights->nb_outputs); + N = recurrent_weights->nb_inputs; + z = zrh; + r = &zrh[N]; + h = &zrh[2*N]; + celt_assert(recurrent_weights->nb_outputs <= 3*MAX_RNN_NEURONS_ALL); + celt_assert(in != state); + compute_linear(input_weights, zrh, in, arch); + compute_linear(recurrent_weights, recur, state, arch); + for (i=0;i<2*N;i++) + zrh[i] += recur[i]; + compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID, arch); + for (i=0;inb_outputs, activation, arch); +} + int sample_loss( LossGenState *st, @@ -21,10 +67,10 @@ int sample_loss( LossGen *model = &st->model; input[0] = st->last_loss; input[1] = percent_loss; - compute_generic_dense(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, arch); - compute_generic_gru(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, arch); - compute_generic_gru(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, arch); - compute_generic_dense(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, arch); + compute_generic_dense_lossgen(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, arch); + compute_generic_gru_lossgen(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, arch); + compute_generic_gru_lossgen(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, arch); + compute_generic_dense_lossgen(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, arch); loss = (float)rand()/RAND_MAX < out; st->last_loss = loss; return loss; @@ -41,6 +87,7 @@ void lossgen_init(LossGenState *st) ret = 0; #endif celt_assert(ret == 0); + (void)ret; } int lossgen_load_model(LossGenState *st, const unsigned char *data, int len) { @@ -59,6 +106,10 @@ int main(int argc, char **argv) { int i, N; float p; LossGenState st; + if (argc!=3) { + fprintf(stderr, "usage: lossgen \n"); + return 1; + } lossgen_init(&st); p = atof(argv[1]); N = atoi(argv[2]); diff --git a/dnn/lpcnet_private.h b/dnn/lpcnet_private.h index e1e3e9c6..7fb8123a 100644 --- a/dnn/lpcnet_private.h +++ b/dnn/lpcnet_private.h @@ -80,5 +80,4 @@ void lpcnet_synthesize_blend_impl(LPCNetState *lpcnet, const opus_int16 *pcm_in, void run_frame_network(LPCNetState *lpcnet, float *gru_a_condition, float *gru_b_condition, float *lpc, const float *features); -int parse_weights(WeightArray **list, const unsigned char *data, int len); #endif diff --git a/dnn/nnet.h b/dnn/nnet.h index 7eb7a57b..425a8dfe 100644 --- a/dnn/nnet.h +++ b/dnn/nnet.h @@ -138,6 +138,7 @@ void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float * void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch); +int parse_weights(WeightArray **list, const unsigned char *data, int len); extern const WeightArray lpcnet_arrays[];