mirror of
https://github.com/xiph/opus.git
synced 2025-05-16 08:28:29 +00:00
Simper GRU implementation just for reset_after.
This commit is contained in:
parent
6c2f7e58fd
commit
040aa437c3
3 changed files with 42 additions and 2 deletions
|
@ -121,10 +121,10 @@ void run_sample_network(NNetState *net, float *pdf, const float *condition, int
|
||||||
compute_embedding(&embed_sig, &in_a[EMBED_SIG_OUT_SIZE], pred);
|
compute_embedding(&embed_sig, &in_a[EMBED_SIG_OUT_SIZE], pred);
|
||||||
compute_embedding(&embed_exc, &in_a[2*EMBED_SIG_OUT_SIZE], last_exc);
|
compute_embedding(&embed_exc, &in_a[2*EMBED_SIG_OUT_SIZE], last_exc);
|
||||||
RNN_COPY(&in_a[2*EMBED_SIG_OUT_SIZE + EMBED_EXC_OUT_SIZE], condition, FEATURE_DENSE2_OUT_SIZE);
|
RNN_COPY(&in_a[2*EMBED_SIG_OUT_SIZE + EMBED_EXC_OUT_SIZE], condition, FEATURE_DENSE2_OUT_SIZE);
|
||||||
compute_gru(&gru_a, net->gru_a_state, in_a);
|
compute_gru2(&gru_a, net->gru_a_state, in_a);
|
||||||
RNN_COPY(in_b, net->gru_a_state, GRU_A_STATE_SIZE);
|
RNN_COPY(in_b, net->gru_a_state, GRU_A_STATE_SIZE);
|
||||||
RNN_COPY(&in_b[GRU_A_STATE_SIZE], condition, FEATURE_DENSE2_OUT_SIZE);
|
RNN_COPY(&in_b[GRU_A_STATE_SIZE], condition, FEATURE_DENSE2_OUT_SIZE);
|
||||||
compute_gru(&gru_b, net->gru_b_state, in_b);
|
compute_gru2(&gru_b, net->gru_b_state, in_b);
|
||||||
compute_mdense(&dual_fc, pdf, net->gru_b_state);
|
compute_mdense(&dual_fc, pdf, net->gru_b_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
38
dnn/nnet.c
38
dnn/nnet.c
|
@ -218,6 +218,44 @@ void compute_gru(const GRULayer *gru, float *state, const float *input)
|
||||||
state[i] = h[i];
|
state[i] = h[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void compute_gru2(const GRULayer *gru, float *state, const float *input)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int N, M;
|
||||||
|
int stride;
|
||||||
|
float zrh[3*MAX_RNN_NEURONS];
|
||||||
|
float recur[3*MAX_RNN_NEURONS];
|
||||||
|
float *z;
|
||||||
|
float *r;
|
||||||
|
float *h;
|
||||||
|
M = gru->nb_inputs;
|
||||||
|
N = gru->nb_neurons;
|
||||||
|
z = zrh;
|
||||||
|
r = &zrh[N];
|
||||||
|
h = &zrh[2*N];
|
||||||
|
celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS);
|
||||||
|
celt_assert(input != state);
|
||||||
|
celt_assert(gru->reset_after);
|
||||||
|
stride = 3*N;
|
||||||
|
/* Compute update gate. */
|
||||||
|
for (i=0;i<3*N;i++)
|
||||||
|
zrh[i] = gru->bias[i];
|
||||||
|
gemm_accum(zrh, gru->input_weights, 3*N, M, stride, input);
|
||||||
|
for (i=0;i<3*N;i++)
|
||||||
|
recur[i] = gru->bias[3*N + i];
|
||||||
|
gemm_accum(recur, gru->recurrent_weights, 3*N, N, stride, state);
|
||||||
|
for (i=0;i<2*N;i++)
|
||||||
|
zrh[i] += recur[i];
|
||||||
|
compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID);
|
||||||
|
for (i=0;i<N;i++)
|
||||||
|
h[i] += recur[2*N+i]*r[i];
|
||||||
|
compute_activation(h, h, N, gru->activation);
|
||||||
|
for (i=0;i<N;i++)
|
||||||
|
h[i] = z[i]*state[i] + (1-z[i])*h[i];
|
||||||
|
for (i=0;i<N;i++)
|
||||||
|
state[i] = h[i];
|
||||||
|
}
|
||||||
|
|
||||||
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input)
|
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -85,6 +85,8 @@ void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
|
||||||
|
|
||||||
void compute_gru(const GRULayer *gru, float *state, const float *input);
|
void compute_gru(const GRULayer *gru, float *state, const float *input);
|
||||||
|
|
||||||
|
void compute_gru2(const GRULayer *gru, float *state, const float *input);
|
||||||
|
|
||||||
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
|
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
|
||||||
|
|
||||||
void compute_embedding(const EmbeddingLayer *layer, float *output, int input);
|
void compute_embedding(const EmbeddingLayer *layer, float *output, int input);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue