Starting to actually test this -- fix a few OOB reads

This commit is contained in:
Jean-Marc Valin 2018-11-26 16:02:49 -05:00
parent 8d62ba067e
commit 538f25565a
3 changed files with 22 additions and 3 deletions

View file

@ -116,7 +116,7 @@ def dump_mdense_layer(self, f, hf):
activation = self.activation.__name__.upper()
max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2])
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation))
.format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
hf.write('extern const MDenseLayer {};\n\n'.format(name));
return False

View file

@ -25,6 +25,7 @@
*/
#include <math.h>
#include <stdio.h>
#include "nnet_data.h"
#include "nnet.h"
#include "common.h"
@ -150,3 +151,21 @@ void lpcnet_synthesize(LPCNetState *lpcnet, short *output, const float *features
lpcnet->last_exc = exc;
}
}
#if 1
#define FRAME_SIZE 160
int main(int argc, char **argv) {
LPCNetState *net;
net = lpcnet_create();
while (1) {
float features[NB_FEATURES];
short pcm[FRAME_SIZE];
fread(features, sizeof(features[0]), NB_FEATURES, stdin);
if (feof(stdin)) break;
lpcnet_synthesize(net, pcm, features, FRAME_SIZE);
fwrite(pcm, sizeof(pcm[0]), FRAME_SIZE, stdout);
}
lpcnet_destroy(net);
return 0;
}
#endif

View file

@ -217,13 +217,13 @@ void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const f
celt_assert(input != output);
celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1));
RNN_COPY(tmp, input, layer->nb_inputs);
RNN_COPY(&tmp[layer->nb_inputs*(layer->kernel_size-1)], input, layer->nb_inputs);
M = layer->nb_inputs*layer->kernel_size;
N = layer->nb_neurons;
stride = N;
for (i=0;i<N;i++)
output[i] = layer->bias[i];
gemm_accum(output, layer->input_weights, N, M, stride, input);
gemm_accum(output, layer->input_weights, N, M, stride, tmp);
compute_activation(output, output, N, layer->activation);
RNN_COPY(mem, &tmp[layer->nb_inputs], layer->nb_inputs*(layer->kernel_size-1));
}