diff --git a/dnn/lpcnet.c b/dnn/lpcnet.c index 80b6d790..d9c928fb 100644 --- a/dnn/lpcnet.c +++ b/dnn/lpcnet.c @@ -154,7 +154,7 @@ void lpcnet_synthesize(LPCNetState *lpcnet, short *output, const float *features /* FIXME: Remove this -- it's just a temporary hack to match the Python code. */ static int start = LPC_ORDER+1; /* FIXME: Do proper rounding once the Python code rounds properly. */ - pitch = (int)floor(50*features[36]+100); + pitch = (int)floor(.1 + 50*features[36]+100); /* FIXME: get the pitch gain from 2 frames in the past. */ pitch_gain = features[PITCH_GAIN_FEATURE]; run_frame_network(lpcnet, condition, gru_a_condition, features, pitch); diff --git a/dnn/test_lpcnet.py b/dnn/test_lpcnet.py index d1910f44..3f52fb3f 100755 --- a/dnn/test_lpcnet.py +++ b/dnn/test_lpcnet.py @@ -59,7 +59,7 @@ pcm_chunk_size = frame_size*feature_chunk_size features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features)) features[:,:,18:36] = 0 -periods = (50*features[:,:,36:37]+100).astype('int16') +periods = (.1 + 50*features[:,:,36:37]+100).astype('int16') diff --git a/dnn/train_lpcnet.py b/dnn/train_lpcnet.py index a9275045..7000df13 100755 --- a/dnn/train_lpcnet.py +++ b/dnn/train_lpcnet.py @@ -136,14 +136,14 @@ features[:,:,18:36] = 0 pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1)) pred = pred.astype('uint8') -periods = (50*features[:,:,36:37]+100).astype('int16') +periods = (.1 + 50*features[:,:,36:37]+100).astype('int16') in_data = np.concatenate([in_data, pred], axis=-1) del pred # dump models to disk as we go -checkpoint = ModelCheckpoint('lpcnet14_384_10_G16_{epoch:02d}.h5') +checkpoint = ModelCheckpoint('lpcnet15_384_10_G16_{epoch:02d}.h5') #model.load_weights('lpcnet9b_384_10_G16_01.h5') model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])