mirror of
https://github.com/xiph/opus.git
synced 2025-05-18 09:28:30 +00:00
moving hyper-parameters to new_lpcnet_model() arguments
more cleaning up too
This commit is contained in:
parent
5ff58fa117
commit
db7569c3da
3 changed files with 11 additions and 15 deletions
|
@ -11,12 +11,9 @@ import numpy as np
|
|||
import h5py
|
||||
import sys
|
||||
|
||||
rnn_units1=384
|
||||
rnn_units2=16
|
||||
pcm_bits = 8
|
||||
embed_size = 128
|
||||
pcm_levels = 2**pcm_bits
|
||||
nb_used_features = 38
|
||||
|
||||
class Sparsify(Callback):
|
||||
def __init__(self, t_start, t_end, interval, density):
|
||||
|
@ -88,7 +85,7 @@ class PCMInit(Initializer):
|
|||
'seed': self.seed
|
||||
}
|
||||
|
||||
def new_wavernn_model():
|
||||
def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38):
|
||||
pcm = Input(shape=(None, 2))
|
||||
exc = Input(shape=(None, 1))
|
||||
feat = Input(shape=(None, nb_used_features))
|
||||
|
@ -127,6 +124,10 @@ def new_wavernn_model():
|
|||
ulaw_prob = md(gru_out2)
|
||||
|
||||
model = Model([pcm, exc, feat, pitch], ulaw_prob)
|
||||
model.rnn_units1 = rnn_units1
|
||||
model.rnn_units2 = rnn_units2
|
||||
model.nb_used_features = nb_used_features
|
||||
|
||||
encoder = Model([feat, pitch], cfeat)
|
||||
|
||||
dec_rnn_in = Concatenate()([cpcm, cexc, dec_feat])
|
||||
|
|
|
@ -15,11 +15,7 @@ config = tf.ConfigProto()
|
|||
config.gpu_options.per_process_gpu_memory_fraction = 0.2
|
||||
set_session(tf.Session(config=config))
|
||||
|
||||
nb_epochs = 40
|
||||
batch_size = 64
|
||||
|
||||
#model = wavenet.new_wavenet_model(fftnet=True)
|
||||
model, enc, dec = lpcnet.new_wavernn_model()
|
||||
model, enc, dec = lpcnet.new_lpcnet_model()
|
||||
|
||||
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
|
||||
#model.summary()
|
||||
|
@ -27,7 +23,7 @@ model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=
|
|||
feature_file = sys.argv[1]
|
||||
frame_size = 160
|
||||
nb_features = 55
|
||||
nb_used_features = lpcnet.nb_used_features
|
||||
nb_used_features = model.nb_used_features
|
||||
|
||||
features = np.fromfile(feature_file, dtype='float32')
|
||||
features = np.resize(features, (-1, nb_features))
|
||||
|
@ -48,8 +44,8 @@ order = 16
|
|||
pcm = np.zeros((nb_frames*pcm_chunk_size, ))
|
||||
fexc = np.zeros((1, 1, 2), dtype='float32')
|
||||
iexc = np.zeros((1, 1, 1), dtype='int16')
|
||||
state1 = np.zeros((1, lpcnet.rnn_units1), dtype='float32')
|
||||
state2 = np.zeros((1, lpcnet.rnn_units2), dtype='float32')
|
||||
state1 = np.zeros((1, model.rnn_units1), dtype='float32')
|
||||
state2 = np.zeros((1, model.rnn_units2), dtype='float32')
|
||||
|
||||
mem = 0
|
||||
coef = 0.85
|
||||
|
|
|
@ -28,8 +28,7 @@ nb_epochs = 40
|
|||
# Try reducing batch_size if you run out of memory on your GPU
|
||||
batch_size = 64
|
||||
|
||||
# Note we are creating a LPCNet model
|
||||
model, _, _ = lpcnet.new_wavernn_model()
|
||||
model, _, _ = lpcnet.new_lpcnet_model()
|
||||
|
||||
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
|
||||
model.summary()
|
||||
|
@ -40,7 +39,7 @@ pred_file = sys.argv[3] # LPC predictor samples. Not used at present, see bel
|
|||
pcm_file = sys.argv[4] # 16 bit unsigned short PCM samples
|
||||
frame_size = 160
|
||||
nb_features = 55
|
||||
nb_used_features = lpcnet.nb_used_features
|
||||
nb_used_features = model.nb_used_features
|
||||
feature_chunk_size = 15
|
||||
pcm_chunk_size = frame_size*feature_chunk_size
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue