mirror of
https://github.com/xiph/opus.git
synced 2025-05-18 17:38:29 +00:00
Implement MDense
This commit is contained in:
parent
d4046036a9
commit
66486004ba
2 changed files with 23 additions and 4 deletions
|
@ -80,8 +80,8 @@ def dump_mdense_layer(self, f, hf):
|
||||||
printVector(f, weights[1], name + '_bias')
|
printVector(f, weights[1], name + '_bias')
|
||||||
printVector(f, weights[1], name + '_factor')
|
printVector(f, weights[1], name + '_factor')
|
||||||
activation = self.activation.__name__.upper()
|
activation = self.activation.__name__.upper()
|
||||||
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n'
|
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
|
||||||
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
|
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation))
|
||||||
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
|
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
|
||||||
hf.write('extern const MDenseLayer {};\n\n'.format(name));
|
hf.write('extern const MDenseLayer {};\n\n'.format(name));
|
||||||
return False
|
return False
|
||||||
|
|
23
dnn/nnet.c
23
dnn/nnet.c
|
@ -118,7 +118,6 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input)
|
||||||
int i;
|
int i;
|
||||||
int N, M;
|
int N, M;
|
||||||
int stride;
|
int stride;
|
||||||
celt_assert(layer->nb_neurons <= MAX_NEURONS);
|
|
||||||
M = layer->nb_inputs;
|
M = layer->nb_inputs;
|
||||||
N = layer->nb_neurons;
|
N = layer->nb_neurons;
|
||||||
stride = N;
|
stride = N;
|
||||||
|
@ -130,6 +129,27 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input)
|
||||||
|
|
||||||
void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
|
void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
|
||||||
{
|
{
|
||||||
|
int i, c;
|
||||||
|
int N, M, C;
|
||||||
|
int stride;
|
||||||
|
M = layer->nb_inputs;
|
||||||
|
N = layer->nb_neurons;
|
||||||
|
C = layer->nb_channels;
|
||||||
|
/* FIXME: Make this C90. */
|
||||||
|
float tmp[N*C];
|
||||||
|
stride = N*C;
|
||||||
|
for (i=0;i<N*C;i++)
|
||||||
|
tmp[i] = layer->bias[i];
|
||||||
|
gemm_accum(tmp, layer->input_weights, N*C, M, stride, input);
|
||||||
|
compute_activation(tmp, tmp, N*C, ACTIVATION_TANH);
|
||||||
|
for (i=0;i<N;i++)
|
||||||
|
output[i] = 0;
|
||||||
|
for (c=0;c<C;c++)
|
||||||
|
{
|
||||||
|
for (i=0;i<N;i++)
|
||||||
|
output[i] += tmp[c*N + i]*layer->factor[c*N + i];
|
||||||
|
}
|
||||||
|
compute_activation(output, output, N, layer->activation);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compute_gru(const GRULayer *gru, float *state, const float *input)
|
void compute_gru(const GRULayer *gru, float *state, const float *input)
|
||||||
|
@ -189,7 +209,6 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f
|
||||||
int N, M;
|
int N, M;
|
||||||
int stride;
|
int stride;
|
||||||
float tmp[MAX_CONV_INPUTS];
|
float tmp[MAX_CONV_INPUTS];
|
||||||
celt_assert(layer->nb_neurons <= MAX_NEURONS);
|
|
||||||
celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
|
celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
|
||||||
M = layer->nb_inputs;
|
M = layer->nb_inputs;
|
||||||
N = layer->nb_neurons;
|
N = layer->nb_neurons;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue