Use a single u-law embedding

This commit is contained in:
Jean-Marc Valin 2019-01-21 16:52:57 -05:00
parent dc082d7c1c
commit b84a06dd08
4 changed files with 14 additions and 18 deletions

View file

@ -240,7 +240,6 @@ W = model.get_layer('gru_a').get_weights()[0][:embed_size,:]
dump_embedding_layer_impl('gru_a_embed_sig', np.dot(E, W), f, hf)
W = model.get_layer('gru_a').get_weights()[0][embed_size:2*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_pred', np.dot(E, W), f, hf)
E = model.get_layer('embed_exc').get_weights()[0]
W = model.get_layer('gru_a').get_weights()[0][2*embed_size:3*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_exc', np.dot(E, W), f, hf)
W = model.get_layer('gru_a').get_weights()[0][3*embed_size:,:]

View file

@ -114,8 +114,7 @@ class PCMInit(Initializer):
}
def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_gpu=True):
pcm = Input(shape=(None, 2))
exc = Input(shape=(None, 1))
pcm = Input(shape=(None, 3))
feat = Input(shape=(None, nb_used_features))
pitch = Input(shape=(None, 1))
dec_feat = Input(shape=(None, 128))
@ -126,9 +125,7 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_g
fconv2 = Conv1D(128, 3, padding='same', activation='tanh', name='feature_conv2')
embed = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_sig')
cpcm = Reshape((-1, embed_size*2))(embed(pcm))
embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_exc')
cexc = Reshape((-1, embed_size))(embed2(exc))
cpcm = Reshape((-1, embed_size*3))(embed(pcm))
pembed = Embedding(256, 64, name='embed_pitch')
cat_feat = Concatenate()([feat, Reshape((-1, 64))(pembed(pitch))])
@ -149,13 +146,13 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_g
rnn = GRU(rnn_units1, return_sequences=True, return_state=True, recurrent_activation="sigmoid", reset_after='true', name='gru_a')
rnn2 = GRU(rnn_units2, return_sequences=True, return_state=True, recurrent_activation="sigmoid", reset_after='true', name='gru_b')
rnn_in = Concatenate()([cpcm, cexc, rep(cfeat)])
rnn_in = Concatenate()([cpcm, rep(cfeat)])
md = MDense(pcm_levels, activation='softmax', name='dual_fc')
gru_out1, _ = rnn(rnn_in)
gru_out2, _ = rnn2(Concatenate()([gru_out1, rep(cfeat)]))
ulaw_prob = md(gru_out2)
model = Model([pcm, exc, feat, pitch], ulaw_prob)
model = Model([pcm, feat, pitch], ulaw_prob)
model.rnn_units1 = rnn_units1
model.rnn_units2 = rnn_units2
model.nb_used_features = nb_used_features
@ -163,10 +160,10 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_g
encoder = Model([feat, pitch], cfeat)
dec_rnn_in = Concatenate()([cpcm, cexc, dec_feat])
dec_rnn_in = Concatenate()([cpcm, dec_feat])
dec_gru_out1, state1 = rnn(dec_rnn_in, initial_state=dec_state1)
dec_gru_out2, state2 = rnn2(Concatenate()([dec_gru_out1, dec_feat]), initial_state=dec_state2)
dec_ulaw_prob = md(dec_gru_out2)
decoder = Model([pcm, exc, dec_feat, dec_state1, dec_state2], [dec_ulaw_prob, state1, state2])
decoder = Model([pcm, dec_feat, dec_state1, dec_state2], [dec_ulaw_prob, state1, state2])
return model, encoder, decoder

View file

@ -63,13 +63,12 @@ periods = (.1 + 50*features[:,:,36:37]+100).astype('int16')
model.load_weights('lpcnet20c_384_10_G16_80.h5')
model.load_weights('lpcnet20g_384_10_G16_02.h5')
order = 16
pcm = np.zeros((nb_frames*pcm_chunk_size, ))
fexc = np.zeros((1, 1, 2), dtype='float32')
iexc = np.zeros((1, 1, 1), dtype='int16')
fexc = np.zeros((1, 1, 3), dtype='int16')
state1 = np.zeros((1, model.rnn_units1), dtype='float32')
state2 = np.zeros((1, model.rnn_units2), dtype='float32')
@ -88,7 +87,7 @@ for c in range(0, nb_frames):
pred = -sum(a*pcm[f*frame_size + i - 1:f*frame_size + i - order-1:-1])
fexc[0, 0, 1] = lin2ulaw(pred)
p, state1, state2 = dec.predict([fexc, iexc, cfeat[:, fr:fr+1, :], state1, state2])
p, state1, state2 = dec.predict([fexc, cfeat[:, fr:fr+1, :], state1, state2])
#Lower the temperature for voiced frames to reduce noisiness
p *= np.power(p, np.maximum(0, 1.5*features[c, fr, 37] - .5))
p = p/(1e-18 + np.sum(p))
@ -96,8 +95,8 @@ for c in range(0, nb_frames):
p = np.maximum(p-0.002, 0).astype('float64')
p = p/(1e-8 + np.sum(p))
iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
pcm[f*frame_size + i] = pred + ulaw2lin(iexc[0, 0, 0])
fexc[0, 0, 2] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
pcm[f*frame_size + i] = pred + ulaw2lin(fexc[0, 0, 2])
fexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i])
mem = coef*mem + pcm[f*frame_size + i]
#print(mem)

View file

@ -91,14 +91,15 @@ features[:,:,18:36] = 0
periods = (.1 + 50*features[:,:,36:37]+100).astype('int16')
in_data = np.concatenate([sig, pred], axis=-1)
in_data = np.concatenate([sig, pred, in_exc], axis=-1)
del sig
del pred
del in_exc
# dump models to disk as we go
checkpoint = ModelCheckpoint('lpcnet20g_384_10_G16_{epoch:02d}.h5')
#model.load_weights('lpcnet9b_384_10_G16_01.h5')
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy')
model.fit([in_data, in_exc, features, periods], out_exc, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, (0.05, 0.05, 0.2))])
model.fit([in_data, features, periods], out_exc, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, (0.05, 0.05, 0.2))])