diff --git a/dnn/lpcnet.py b/dnn/lpcnet.py index 75953065..e58874d4 100644 --- a/dnn/lpcnet.py +++ b/dnn/lpcnet.py @@ -64,12 +64,12 @@ class Sparsify(Callback): N = p.shape[0] #print("nb = ", nb, ", N = ", N); #print(p.shape) - density = self.final_density - if self.batch < self.t_end: - r = 1 - (self.batch-self.t_start)/(self.t_end - self.t_start) - density = 1 - (1-self.final_density)*(1 - r*r*r) #print ("density = ", density) for k in range(nb): + density = self.final_density[k] + if self.batch < self.t_end: + r = 1 - (self.batch-self.t_start)/(self.t_end - self.t_start) + density = 1 - (1-self.final_density[k])*(1 - r*r*r) A = p[:, k*N:(k+1)*N] A = A - np.diag(np.diag(A)) A = np.transpose(A, (1, 0)) diff --git a/dnn/train_lpcnet.py b/dnn/train_lpcnet.py index 7000df13..81d7f5a1 100755 --- a/dnn/train_lpcnet.py +++ b/dnn/train_lpcnet.py @@ -147,4 +147,4 @@ checkpoint = ModelCheckpoint('lpcnet15_384_10_G16_{epoch:02d}.h5') #model.load_weights('lpcnet9b_384_10_G16_01.h5') model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) -model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, 0.1)]) +model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, (0.1, 0.1, 0.1))])