From b923fd1e2811a4bf6a4ea1461a550c8d15143f01 Mon Sep 17 00:00:00 2001 From: Jean-Marc Valin Date: Thu, 21 Dec 2023 18:01:57 -0500 Subject: [PATCH] lossgen: better training, README.md --- dnn/torch/lossgen/README.md | 27 +++++++++++++++++++++++++++ dnn/torch/lossgen/train_lossgen.py | 13 ++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) create mode 100644 dnn/torch/lossgen/README.md diff --git a/dnn/torch/lossgen/README.md b/dnn/torch/lossgen/README.md new file mode 100644 index 00000000..26abc9eb --- /dev/null +++ b/dnn/torch/lossgen/README.md @@ -0,0 +1,27 @@ +#Packet loss simulator + +This code is an attempt at simulating better packet loss scenarios. The most common way of simulating +packet loss is to use a random sequence where each packet loss event is uncorrelated with previous events. +That is a simplistic model since we know that losses often occur in bursts. This model uses real data +to build a generative model for packet loss. + +We use the training data provided for the Audio Deep Packet Loss Concealment Challenge, which is available at: + +http://plcchallenge2022pub.blob.core.windows.net/plcchallengearchive/test\_train.tar.gz + +To create the training data, run: + +`./process_data.sh //test_train/train/lossy_signals/` + +That will create an ascii loss\_sorted.txt file with all loss data sorted in increasing packet loss +percentage. Then just run: + +`python ./train_lossgen.py` + +to train a model + +To generate a sequence, run + +`python3 ./test_lossgen.py output.txt --length 10000` + +where is the .pth model file and is the amount of loss (e.g. 0.2 for 20% loss). diff --git a/dnn/torch/lossgen/train_lossgen.py b/dnn/torch/lossgen/train_lossgen.py index 26e0f012..4dda3190 100644 --- a/dnn/torch/lossgen/train_lossgen.py +++ b/dnn/torch/lossgen/train_lossgen.py @@ -27,9 +27,11 @@ class LossDataset(torch.utils.data.Dataset): return self.nb_sequences def __getitem__(self, index): - r0 = np.random.normal(scale=.02, size=(1,1)).astype('float32') - r1 = np.random.normal(scale=.02, size=(self.sequence_length,1)).astype('float32') - return [self.loss[index, :, :], self.perc[index, :, :]+r0+r1] + r0 = np.random.normal(scale=.1, size=(1,1)).astype('float32') + r1 = np.random.normal(scale=.1, size=(self.sequence_length,1)).astype('float32') + perc = self.perc[index, :, :] + perc = perc + (r0+r1)*perc*(1-perc) + return [self.loss[index, :, :], perc] adam_betas = [0.8, 0.98] @@ -61,7 +63,7 @@ scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lam if __name__ == '__main__': model.to(device) - + states = None for epoch in range(1, epochs + 1): running_loss = 0 @@ -73,7 +75,8 @@ if __name__ == '__main__': loss = loss.to(device) perc = perc.to(device) - out, _ = model(loss, perc) + out, states = model(loss, perc, states=states) + states = [state.detach() for state in states] out = torch.sigmoid(out[:,:-1,:]) target = loss[:,1:,:]