More meaningful names

This commit is contained in:
Jean-Marc Valin 2018-11-23 19:51:34 -05:00
parent b9cd61be8b
commit b0c61158f7
2 changed files with 13 additions and 13 deletions

View file

@ -32,7 +32,7 @@ def printVector(f, vector, name):
def dump_layer_ignore(self, f, hf):
print("ignoring layer " + self.name + " of type " + self.__class__.__name__)
False
return False
Layer.dump_layer = dump_layer_ignore
def dump_gru_layer(self, f, hf):
@ -55,7 +55,7 @@ def dump_gru_layer(self, f, hf):
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]//3, activation, reset_after))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
hf.write('extern const GRULayer {};\n\n'.format(name));
True
return True
CuDNNGRU.dump_layer = dump_gru_layer
GRU.dump_layer = dump_gru_layer
@ -74,7 +74,7 @@ def dump_dense_layer(self, f, hf):
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
hf.write('extern const DenseLayer {};\n\n'.format(name));
False
return False
Dense.dump_layer = dump_dense_layer
def dump_mdense_layer(self, f, hf):
@ -93,7 +93,7 @@ def dump_mdense_layer(self, f, hf):
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
hf.write('extern const MDenseLayer {};\n\n'.format(name));
False
return False
MDense.dump_layer = dump_mdense_layer

View file

@ -94,12 +94,12 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38):
dec_state1 = Input(shape=(rnn_units1,))
dec_state2 = Input(shape=(rnn_units2,))
fconv1 = Conv1D(128, 3, padding='same', activation='tanh')
fconv2 = Conv1D(102, 3, padding='same', activation='tanh')
fconv1 = Conv1D(128, 3, padding='same', activation='tanh', name='feature_conv1')
fconv2 = Conv1D(102, 3, padding='same', activation='tanh', name='feature_conv2')
embed = Embedding(256, embed_size, embeddings_initializer=PCMInit())
embed = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_sig')
cpcm = Reshape((-1, embed_size*2))(embed(pcm))
embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit())
embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_exc')
cexc = Reshape((-1, embed_size))(embed2(exc))
pembed = Embedding(256, 64)
@ -107,18 +107,18 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38):
cfeat = fconv2(fconv1(cat_feat))
fdense1 = Dense(128, activation='tanh')
fdense2 = Dense(128, activation='tanh')
fdense1 = Dense(128, activation='tanh', name='feature_dense1')
fdense2 = Dense(128, activation='tanh', name='feature_dense2')
cfeat = Add()([cfeat, cat_feat])
cfeat = fdense2(fdense1(cfeat))
rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True)
rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True)
rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True, name='gru_a')
rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True, name='gru_b')
rnn_in = Concatenate()([cpcm, cexc, rep(cfeat)])
md = MDense(pcm_levels, activation='softmax')
md = MDense(pcm_levels, activation='softmax', name='dual_fc')
gru_out1, _ = rnn(rnn_in)
gru_out2, _ = rnn2(Concatenate()([gru_out1, rep(cfeat)]))
ulaw_prob = md(gru_out2)