mirror of
https://github.com/xiph/opus.git
synced 2025-06-02 00:27:43 +00:00
first wavenet implementation
This commit is contained in:
parent
374ba430c4
commit
f50058f3e3
3 changed files with 171 additions and 0 deletions
52
dnn/causalconv.py
Normal file
52
dnn/causalconv.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
from keras import backend as K
|
||||
from keras.engine.topology import Layer
|
||||
from keras.layers import activations, initializers, regularizers, constraints, InputSpec, Conv1D
|
||||
import numpy as np
|
||||
|
||||
class CausalConv(Conv1D):
|
||||
|
||||
def __init__(self, filters,
|
||||
kernel_size,
|
||||
dilation_rate=1,
|
||||
activation=None,
|
||||
use_bias=True,
|
||||
kernel_initializer='glorot_uniform',
|
||||
bias_initializer='zeros',
|
||||
kernel_regularizer=None,
|
||||
bias_regularizer=None,
|
||||
activity_regularizer=None,
|
||||
kernel_constraint=None,
|
||||
bias_constraint=None,
|
||||
return_memory=False,
|
||||
**kwargs):
|
||||
|
||||
super(CausalConv, self).__init__(
|
||||
filters=filters,
|
||||
kernel_size=kernel_size,
|
||||
strides=1,
|
||||
padding='valid',
|
||||
data_format='channels_last',
|
||||
dilation_rate=dilation_rate,
|
||||
activation=activation,
|
||||
use_bias=use_bias,
|
||||
kernel_initializer=kernel_initializer,
|
||||
bias_initializer=bias_initializer,
|
||||
kernel_regularizer=kernel_regularizer,
|
||||
bias_regularizer=bias_regularizer,
|
||||
activity_regularizer=activity_regularizer,
|
||||
kernel_constraint=kernel_constraint,
|
||||
bias_constraint=bias_constraint,
|
||||
**kwargs)
|
||||
self.mem_size = dilation_rate*(kernel_size-1)
|
||||
self.return_memory = return_memory
|
||||
|
||||
def call(self, inputs, memory=None):
|
||||
if memory is None:
|
||||
mem = K.zeros((K.shape(inputs)[0], self.mem_size, K.shape(inputs)[-1]))
|
||||
else:
|
||||
mem = K.variable(K.cast_to_floatx(memory))
|
||||
inputs = K.concatenate([mem, inputs], axis=1)
|
||||
ret = super(CausalConv, self).call(inputs)
|
||||
if self.return_memory:
|
||||
ret = ret, inputs[:, :self.mem_size, :]
|
||||
return ret
|
69
dnn/train_wavenet.py
Executable file
69
dnn/train_wavenet.py
Executable file
|
@ -0,0 +1,69 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import wavenet
|
||||
import sys
|
||||
import numpy as np
|
||||
from keras.optimizers import Adam
|
||||
from keras.callbacks import ModelCheckpoint
|
||||
from ulaw import ulaw2lin, lin2ulaw
|
||||
import keras.backend as K
|
||||
import h5py
|
||||
|
||||
#import tensorflow as tf
|
||||
#from keras.backend.tensorflow_backend import set_session
|
||||
#config = tf.ConfigProto()
|
||||
#config.gpu_options.per_process_gpu_memory_fraction = 0.44
|
||||
#set_session(tf.Session(config=config))
|
||||
|
||||
nb_epochs = 40
|
||||
batch_size = 32
|
||||
|
||||
model = wavenet.new_wavenet_model()
|
||||
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
|
||||
model.summary()
|
||||
|
||||
pcmfile = sys.argv[1]
|
||||
feature_file = sys.argv[2]
|
||||
frame_size = 160
|
||||
nb_features = 54
|
||||
nb_used_features = wavenet.nb_used_features
|
||||
feature_chunk_size = 15
|
||||
pcm_chunk_size = frame_size*feature_chunk_size
|
||||
|
||||
data = np.fromfile(pcmfile, dtype='int8')
|
||||
nb_frames = len(data)//pcm_chunk_size
|
||||
|
||||
features = np.fromfile(feature_file, dtype='float32')
|
||||
|
||||
data = data[:nb_frames*pcm_chunk_size]
|
||||
features = features[:nb_frames*feature_chunk_size*nb_features]
|
||||
|
||||
in_data = np.concatenate([data[0:1], data[:-1]])/16.;
|
||||
|
||||
features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
|
||||
pitch = 1.*data
|
||||
pitch[:320] = 0
|
||||
for i in range(2, nb_frames*feature_chunk_size):
|
||||
period = int(50*features[i,36]+100)
|
||||
period = period - 4
|
||||
pitch[i*frame_size:(i+1)*frame_size] = data[i*frame_size-period:(i+1)*frame_size-period]
|
||||
in_pitch = np.reshape(pitch/16., (nb_frames, pcm_chunk_size, 1))
|
||||
|
||||
in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
|
||||
out_data = np.reshape(data, (nb_frames, pcm_chunk_size, 1))
|
||||
out_data = (out_data.astype('int16')+128).astype('uint8')
|
||||
features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
|
||||
features = features[:, :, :nb_used_features]
|
||||
|
||||
|
||||
#in_data = np.concatenate([in_data, in_pitch], axis=-1)
|
||||
|
||||
#with h5py.File('in_data.h5', 'w') as f:
|
||||
# f.create_dataset('data', data=in_data[:50000, :, :])
|
||||
# f.create_dataset('feat', data=features[:50000, :, :])
|
||||
|
||||
checkpoint = ModelCheckpoint('wavenet3a_{epoch:02d}.h5')
|
||||
|
||||
#model.load_weights('wavernn1c_01.h5')
|
||||
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=2e-4), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
|
||||
model.fit([in_data, features], out_data, batch_size=batch_size, epochs=30, validation_split=0.2, callbacks=[checkpoint])
|
50
dnn/wavenet.py
Normal file
50
dnn/wavenet.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import math
|
||||
from keras.models import Model
|
||||
from keras.layers import Input, LSTM, CuDNNGRU, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D, Add, Multiply, Bidirectional, MaxPooling1D, Activation
|
||||
from keras import backend as K
|
||||
from mdense import MDense
|
||||
import numpy as np
|
||||
import h5py
|
||||
import sys
|
||||
from causalconv import CausalConv
|
||||
|
||||
units=128
|
||||
pcm_bits = 8
|
||||
pcm_levels = 2**pcm_bits
|
||||
nb_used_features = 38
|
||||
|
||||
|
||||
def new_wavenet_model():
|
||||
pcm = Input(shape=(None, 1))
|
||||
pitch = Input(shape=(None, 1))
|
||||
feat = Input(shape=(None, nb_used_features))
|
||||
dec_feat = Input(shape=(None, 32))
|
||||
|
||||
fconv1 = Conv1D(128, 3, padding='same', activation='tanh')
|
||||
fconv2 = Conv1D(32, 3, padding='same', activation='tanh')
|
||||
|
||||
cfeat = fconv2(fconv1(feat))
|
||||
|
||||
rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
|
||||
|
||||
activation='tanh'
|
||||
rfeat = rep(cfeat)
|
||||
#tmp = Concatenate()([pcm, rfeat])
|
||||
tmp = pcm
|
||||
for k in range(10):
|
||||
res = tmp
|
||||
tmp = Concatenate()([tmp, rfeat])
|
||||
c1 = CausalConv(units, 2, dilation_rate=2**k, activation='tanh')
|
||||
c2 = CausalConv(units, 2, dilation_rate=2**k, activation='sigmoid')
|
||||
tmp = Multiply()([c1(tmp), c2(tmp)])
|
||||
tmp = Dense(units, activation='relu')(tmp)
|
||||
if k != 0:
|
||||
tmp = Add()([tmp, res])
|
||||
|
||||
md = MDense(pcm_levels, activation='softmax')
|
||||
ulaw_prob = md(tmp)
|
||||
|
||||
model = Model([pcm, feat], ulaw_prob)
|
||||
return model
|
Loading…
Add table
Add a link
Reference in a new issue