mirror of
https://github.com/xiph/opus.git
synced 2025-05-18 09:28:30 +00:00
Dump Conv1D (didn't check weight ordering at all)
This commit is contained in:
parent
477d08734d
commit
d4046036a9
3 changed files with 27 additions and 13 deletions
|
@ -64,10 +64,7 @@ def dump_dense_layer(self, f, hf):
|
||||||
weights = self.get_weights()
|
weights = self.get_weights()
|
||||||
printVector(f, weights[0], name + '_weights')
|
printVector(f, weights[0], name + '_weights')
|
||||||
printVector(f, weights[-1], name + '_bias')
|
printVector(f, weights[-1], name + '_bias')
|
||||||
if hasattr(self, 'activation'):
|
activation = self.activation.__name__.upper()
|
||||||
activation = self.activation.__name__.upper()
|
|
||||||
else:
|
|
||||||
activation = 'TANH'
|
|
||||||
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
|
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
|
||||||
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
|
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
|
||||||
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
|
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
|
||||||
|
@ -82,10 +79,7 @@ def dump_mdense_layer(self, f, hf):
|
||||||
printVector(f, weights[0], name + '_weights')
|
printVector(f, weights[0], name + '_weights')
|
||||||
printVector(f, weights[1], name + '_bias')
|
printVector(f, weights[1], name + '_bias')
|
||||||
printVector(f, weights[1], name + '_factor')
|
printVector(f, weights[1], name + '_factor')
|
||||||
if hasattr(self, 'activation'):
|
activation = self.activation.__name__.upper()
|
||||||
activation = self.activation.__name__.upper()
|
|
||||||
else:
|
|
||||||
activation = 'TANH'
|
|
||||||
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n'
|
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n'
|
||||||
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
|
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
|
||||||
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
|
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
|
||||||
|
@ -93,6 +87,21 @@ def dump_mdense_layer(self, f, hf):
|
||||||
return False
|
return False
|
||||||
MDense.dump_layer = dump_mdense_layer
|
MDense.dump_layer = dump_mdense_layer
|
||||||
|
|
||||||
|
def dump_conv1d_layer(self, f, hf):
|
||||||
|
name = self.name
|
||||||
|
print("printing layer " + name + " of type " + self.__class__.__name__)
|
||||||
|
weights = self.get_weights()
|
||||||
|
printVector(f, weights[0], name + '_weights')
|
||||||
|
printVector(f, weights[-1], name + '_bias')
|
||||||
|
activation = self.activation.__name__.upper()
|
||||||
|
f.write('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
|
||||||
|
.format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
|
||||||
|
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
|
||||||
|
hf.write('extern const Conv1DLayer {};\n\n'.format(name));
|
||||||
|
return False
|
||||||
|
Conv1D.dump_layer = dump_conv1d_layer
|
||||||
|
|
||||||
|
|
||||||
def dump_embedding_layer(self, f, hf):
|
def dump_embedding_layer(self, f, hf):
|
||||||
name = self.name
|
name = self.name
|
||||||
print("printing layer " + name + " of type " + self.__class__.__name__)
|
print("printing layer " + name + " of type " + self.__class__.__name__)
|
||||||
|
|
13
dnn/nnet.c
13
dnn/nnet.c
|
@ -128,6 +128,10 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input)
|
||||||
compute_activation(output, output, N, layer->activation);
|
compute_activation(output, output, N, layer->activation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void compute_gru(const GRULayer *gru, float *state, const float *input)
|
void compute_gru(const GRULayer *gru, float *state, const float *input)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -146,16 +150,14 @@ void compute_gru(const GRULayer *gru, float *state, const float *input)
|
||||||
z[i] = gru->bias[i];
|
z[i] = gru->bias[i];
|
||||||
gemm_accum(z, gru->input_weights, N, M, stride, input);
|
gemm_accum(z, gru->input_weights, N, M, stride, input);
|
||||||
gemm_accum(z, gru->recurrent_weights, N, N, stride, state);
|
gemm_accum(z, gru->recurrent_weights, N, N, stride, state);
|
||||||
for (i=0;i<N;i++)
|
compute_activation(z, z, N, ACTIVATION_SIGMOID);
|
||||||
z[i] = sigmoid_approx(z[i]);
|
|
||||||
|
|
||||||
/* Compute reset gate. */
|
/* Compute reset gate. */
|
||||||
for (i=0;i<N;i++)
|
for (i=0;i<N;i++)
|
||||||
r[i] = gru->bias[N + i];
|
r[i] = gru->bias[N + i];
|
||||||
gemm_accum(r, &gru->input_weights[N], N, M, stride, input);
|
gemm_accum(r, &gru->input_weights[N], N, M, stride, input);
|
||||||
gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state);
|
gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state);
|
||||||
for (i=0;i<N;i++)
|
compute_activation(r, r, N, ACTIVATION_SIGMOID);
|
||||||
r[i] = sigmoid_approx(r[i]);
|
|
||||||
|
|
||||||
/* Compute output. */
|
/* Compute output. */
|
||||||
for (i=0;i<N;i++)
|
for (i=0;i<N;i++)
|
||||||
|
@ -174,8 +176,9 @@ void compute_gru(const GRULayer *gru, float *state, const float *input)
|
||||||
gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input);
|
gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input);
|
||||||
gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp);
|
gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp);
|
||||||
}
|
}
|
||||||
|
compute_activation(h, h, N, gru->activation);
|
||||||
for (i=0;i<N;i++)
|
for (i=0;i<N;i++)
|
||||||
h[i] = z[i]*state[i] + (1-z[i])*tansig_approx(h[i]);
|
h[i] = z[i]*state[i] + (1-z[i])*h[i];
|
||||||
for (i=0;i<N;i++)
|
for (i=0;i<N;i++)
|
||||||
state[i] = h[i];
|
state[i] = h[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,8 @@ void compute_activation(float *output, float *input, int N, int activation);
|
||||||
|
|
||||||
void compute_dense(const DenseLayer *layer, float *output, const float *input);
|
void compute_dense(const DenseLayer *layer, float *output, const float *input);
|
||||||
|
|
||||||
|
void compute_mdense(const MDenseLayer *layer, float *output, const float *input);
|
||||||
|
|
||||||
void compute_gru(const GRULayer *gru, float *state, const float *input);
|
void compute_gru(const GRULayer *gru, float *state, const float *input);
|
||||||
|
|
||||||
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
|
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue