Remove useless (and possibly hurtful) residual connection

I guess it's a bad idea to forward inputs directly
This commit is contained in:
Jean-Marc Valin 2019-01-17 23:17:42 -05:00
parent 4698b28345
commit 38cd5cf08f
4 changed files with 3 additions and 5 deletions

View file

@ -81,7 +81,6 @@ void run_frame_network(LPCNetState *lpcnet, float *condition, float *gru_a_condi
compute_conv1d(&feature_conv2, conv2_out, net->feature_conv2_state, conv1_out);
celt_assert(FRAME_INPUT_SIZE == FEATURE_CONV2_OUT_SIZE);
if (lpcnet->frame_count < FEATURES_DELAY) RNN_CLEAR(conv2_out, FEATURE_CONV2_OUT_SIZE);
for (i=0;i<FEATURE_CONV2_OUT_SIZE;i++) conv2_out[i] += lpcnet->old_input[FEATURES_DELAY-1][i];
memmove(lpcnet->old_input[1], lpcnet->old_input[0], (FEATURES_DELAY-1)*FRAME_INPUT_SIZE*sizeof(in[0]));
memcpy(lpcnet->old_input[0], in, FRAME_INPUT_SIZE*sizeof(in[0]));
compute_dense(&feature_dense1, dense1_out, conv2_out);

View file

@ -122,7 +122,7 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_g
dec_state2 = Input(shape=(rnn_units2,))
fconv1 = Conv1D(128, 3, padding='same', activation='tanh', name='feature_conv1')
fconv2 = Conv1D(102, 3, padding='same', activation='tanh', name='feature_conv2')
fconv2 = Conv1D(128, 3, padding='same', activation='tanh', name='feature_conv2')
embed = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_sig')
cpcm = Reshape((-1, embed_size*2))(embed(pcm))
@ -137,7 +137,6 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_g
fdense1 = Dense(128, activation='tanh', name='feature_dense1')
fdense2 = Dense(128, activation='tanh', name='feature_dense2')
cfeat = Add()([cfeat, cat_feat])
cfeat = fdense2(fdense1(cfeat))
rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))

View file

@ -63,7 +63,7 @@ periods = (.1 + 50*features[:,:,36:37]+100).astype('int16')
model.load_weights('lpcnet9_384_10_G16_120.h5')
model.load_weights('lpcnet20c_384_10_G16_80.h5')
order = 16

View file

@ -97,7 +97,7 @@ del sig
del pred
# dump models to disk as we go
checkpoint = ModelCheckpoint('lpcnet20_384_10_G16_{epoch:02d}.h5')
checkpoint = ModelCheckpoint('lpcnet20c_384_10_G16_{epoch:02d}.h5')
#model.load_weights('lpcnet9b_384_10_G16_01.h5')
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy')