mirror of
https://github.com/xiph/opus.git
synced 2025-05-17 17:08:29 +00:00
Avoid rounding the prediction multiple times
This commit is contained in:
parent
3e71248298
commit
2facc08bb4
1 changed files with 3 additions and 3 deletions
|
@ -94,7 +94,7 @@ features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
|
|||
# Note: the LPC predictor output is now calculated by the loop below, this code was
|
||||
# for an ealier version that implemented the prediction filter in C
|
||||
|
||||
upred = np.zeros((nb_frames*pcm_chunk_size,), dtype='int16')
|
||||
upred = np.zeros((nb_frames*pcm_chunk_size,), dtype='float32')
|
||||
|
||||
# Use 16th order LPC to generate LPC prediction output upred[] and (in
|
||||
# mu-law form) pred[]
|
||||
|
@ -136,8 +136,8 @@ periods = (50*features[:,:,36:37]+100).astype('int16')
|
|||
in_data = np.concatenate([in_data, pred], axis=-1)
|
||||
|
||||
# dump models to disk as we go
|
||||
checkpoint = ModelCheckpoint('lpcnet9c_384_10_G16_{epoch:02d}.h5')
|
||||
checkpoint = ModelCheckpoint('lpcnet14_384_10_G16_{epoch:02d}.h5')
|
||||
|
||||
#model.load_weights('wavenet4f2_30.h5')
|
||||
#model.load_weights('lpcnet9b_384_10_G16_01.h5')
|
||||
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
|
||||
model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, 0.1)])
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue