In [8]:
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import nltk
import os
import bs4
import random

torch.manual_seed(12345)
random.seed(12345)

nltk.data.path.append("/home/david/Programming/data/nltk_data")

In [25]:
def concat_contractions(tokens):
    contractions = set(["'ve", "'d", "'m", "'ll", "'re", "n't"])
    return ["".join(tokens[i]) if (i+1 == len(tokens) or tokens[i+1] not in contractions) else "".join(tokens[(i):(i+2)]) for i in range(len(tokens)) if tokens[i] not in contractions]

def data_processing(ds_paths, max_len=500, split_ratio=1.0):
    ds = []
    for i, tfp in enumerate(ds_paths):
        idx, rating = os.path.basename(tfp).split(".")[0].split("_")
        with open(tfp, "r") as f:
            raw = f.readlines()
            raw = bs4.BeautifulSoup(raw[0], "html5lib")
            txt = raw.get_text(separator=' ')
            tokens = nltk.word_tokenize(txt)
            tokens = concat_contractions(tokens)
            #tokens = [vocab[w] if w in vocab else len(vocab) for w in tokens] # keep out of vocab
            tokens = [vocab[w] for w in tokens if w in vocab]
            if len(tokens) > max_len:
                tokens = tokens[:max_len]
            elif len(tokens) < max_len:
                tokens = tokens + [0]*(max_len-len(tokens))
            ds.append((tokens, int(rating)))
    dat, labels = zip(*ds)
    assert split_ratio >= 0. and split_ratio <= 1.0
    if split_ratio == 1.:
        return (dat, labels), (None, None)
    else:
        split_idx = int(len(dat) * split_ratio)
        tidx = list(range(len(dat)))
        random.shuffle(tidx)
        tidx, vidx = tidx[:split_idx], tidx[split_idx:]
        ts, ts_labels = [dat[tid] for tid in tidx], [labels[tid] for tid in tidx]
        vs, vs_labels = [dat[vid] for vid in vidx], [labels[vid] for vid in vidx]
        return (ts, ts_labels), (vs, vs_labels)

In [5]:
IMDB_BASEDIR = "/home/david/Programming/data/aclImdb"
%ls $IMDB_BASEDIR
train_paths = sorted([f.path for d in ["pos", "neg"] for f in os.scandir(os.path.join(IMDB_BASEDIR, "train", d))])
test_paths = sorted([f.path for d in ["pos", "neg"] for f in os.scandir(os.path.join(IMDB_BASEDIR, "test", d))])

train_paths[:5]


imdbEr.txt  imdb.vocab  README  test/  train/
Out[5]:
['/home/david/Programming/data/aclImdb/train/neg/0_3.txt',
 '/home/david/Programming/data/aclImdb/train/neg/10000_4.txt',
 '/home/david/Programming/data/aclImdb/train/neg/10001_4.txt',
 '/home/david/Programming/data/aclImdb/train/neg/10002_1.txt',
 '/home/david/Programming/data/aclImdb/train/neg/10003_1.txt']

In [6]:
vocab_limit = 5000
with open(os.path.join(IMDB_BASEDIR, "imdb.vocab"), "r") as f:
    vocab = {w:(i+1) for i, w in enumerate([l.strip() for l in f.readlines()][:vocab_limit])}

In [26]:
trainset, validset = data_processing(train_paths, split_ratio = 0.9)
print(len(trainset[0][0]), len(validset[0][0]))
ts, ts_labels = torch.Tensor(trainset[0]).long(), torch.Tensor(trainset[1])
ts_labels = (ts_labels > 5).float()
dts = data.TensorDataset(ts, ts_labels)
dlts = data.DataLoader(dts, batch_size=100)


500 500

In [33]:
vs, vs_labels = torch.Tensor(validset[0]).long(), torch.Tensor(validset[1])
vs_labels = (vs_labels > 5).float()
dvs = data.TensorDataset(vs, vs_labels)
dlvs = data.DataLoader(dvs, batch_size=100)
print(len(vs))


2500

In [12]:
#split_ratio = 0.9
#split_idx = int(len(trainset) * split_ratio)
#tidx = list(range(len(trainset)))
#random.shuffle(tidx)
#tidx, vidx = tidx[:split_idx], tidx[split_idx:]
#ts, ts_labels = [trainset[tid] for tid in tidx], [train_labels[tid] for tid in tidx]
#vs, vs_labels = [trainset[vid] for vid in vidx], [train_labels[vid] for vid in vidx]

#ts, ts_labels = torch.Tensor(ts).long(), torch.Tensor(ts_labels)
#ts_labels = (ts_labels > 5).float()
#dts = data.TensorDataset(ts, ts_labels)
#dlts = data.DataLoader(dts, batch_size=100)


25000 2

In [60]:
class SingleHiddenNN(nn.Module):
    def __init__(self, vocab_size, max_len, embed_elems, batch_size):
        super(SingleHiddenNN, self).__init__()
        self.vocab_size = vocab_size
        self.embed_elems = embed_elems
        self.max_len = max_len
        self.emb = nn.Embedding(self.vocab_size+1, self.embed_elems)
        self.fc = nn.Linear(int(self.max_len * self.embed_elems), 100)
        self.relu = nn.SELU()
        self.dropout = nn.Dropout(0.7)
        self.out = nn.Linear(100, 1)
        self.sigmoid = nn.Sigmoid()
    def forward(self, input):
        x = self.emb(input)
        x = x.view(input.size(0), -1)
        x = self.fc(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.out(x)
        x = self.sigmoid(x)
        return x.view(-1)

print(ts.min(), ts.max())
print(ts.size())
model = SingleHiddenNN(len(vocab), 500, 32, 100)
print(model)
criterion = nn.BCELoss()
optimizer = []
optimizer += [torch.optim.Adam(model.parameters(), lr=0.0001)]
optimizer += [torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)]
epochs = 250
for epoch in range(epochs):
    model.train()
    running_loss = 0
    for i, (mb, tgts) in enumerate(dlts):
        model.zero_grad()
        mb, tgts = torch.autograd.Variable(mb), torch.autograd.Variable(tgts.float())
        out = model(mb)
        loss = criterion(out, tgts)
        loss.backward()
        opt_idx = epoch % 2
        optimizer[opt_idx].step()
        running_loss += loss.data[0]
    print("epoch {} had a loss of {:.5}:".format(epoch+1, running_loss))
    if epoch > 0 and epoch % 5 == 0:
        model.eval()
        correct = 0
        for vmb, vtgts in dlvs:
            vmb, vtgts = torch.autograd.Variable(vmb), torch.autograd.Variable(vtgts.float())
            vout = model(vmb)
            vpred = vout.round()
            correct += (vpred == vtgts).data.sum()
        print("correct: {}, total: {}".format(correct, len(vs)))
        print("validation accuracy: {:.2f}".format(100.*correct/len(vs)))


0 5000
torch.Size([22500, 500])
SingleHiddenNN (
  (emb): Embedding(5001, 32)
  (fc): Linear (16000 -> 100)
  (relu): SELU
  (dropout): Dropout (p = 0.7)
  (out): Linear (100 -> 1)
  (sigmoid): Sigmoid ()
)
epoch 1 had a loss of 180.21:
epoch 2 had a loss of 149.41:
epoch 3 had a loss of 157.26:
epoch 4 had a loss of 136.89:
epoch 5 had a loss of 146.05:
epoch 6 had a loss of 128.16:
correct: 1506, total: 2500
validation accuracy: 60.24
epoch 7 had a loss of 138.26:
epoch 8 had a loss of 122.53:
epoch 9 had a loss of 131.25:
epoch 10 had a loss of 116.95:
epoch 11 had a loss of 125.69:
correct: 1515, total: 2500
validation accuracy: 60.60
epoch 12 had a loss of 113.37:
epoch 13 had a loss of 120.07:
epoch 14 had a loss of 106.46:
epoch 15 had a loss of 116.02:
epoch 16 had a loss of 102.44:
correct: 1567, total: 2500
validation accuracy: 62.68
epoch 17 had a loss of 111.7:
epoch 18 had a loss of 99.776:
epoch 19 had a loss of 109.25:
epoch 20 had a loss of 96.52:
epoch 21 had a loss of 104.85:
correct: 1563, total: 2500
validation accuracy: 62.52
epoch 22 had a loss of 91.543:
epoch 23 had a loss of 100.93:
epoch 24 had a loss of 89.977:
epoch 25 had a loss of 97.779:
epoch 26 had a loss of 85.79:
correct: 1630, total: 2500
validation accuracy: 65.20
epoch 27 had a loss of 94.657:
epoch 28 had a loss of 82.805:
epoch 29 had a loss of 91.25:
epoch 30 had a loss of 81.997:
epoch 31 had a loss of 90.426:
correct: 1589, total: 2500
validation accuracy: 63.56
epoch 32 had a loss of 78.93:
epoch 33 had a loss of 86.228:
epoch 34 had a loss of 76.29:
epoch 35 had a loss of 83.203:
epoch 36 had a loss of 74.013:
correct: 1689, total: 2500
validation accuracy: 67.56
epoch 37 had a loss of 82.516:
epoch 38 had a loss of 72.502:
epoch 39 had a loss of 79.541:
epoch 40 had a loss of 70.698:
epoch 41 had a loss of 76.423:
correct: 1676, total: 2500
validation accuracy: 67.04
epoch 42 had a loss of 66.121:
epoch 43 had a loss of 74.163:
epoch 44 had a loss of 65.6:
epoch 45 had a loss of 72.702:
epoch 46 had a loss of 63.273:
correct: 1744, total: 2500
validation accuracy: 69.76
epoch 47 had a loss of 70.441:
epoch 48 had a loss of 61.714:
epoch 49 had a loss of 69.674:
epoch 50 had a loss of 60.041:
epoch 51 had a loss of 67.995:
correct: 1672, total: 2500
validation accuracy: 66.88
epoch 52 had a loss of 58.486:
epoch 53 had a loss of 67.985:
epoch 54 had a loss of 56.784:
epoch 55 had a loss of 65.629:
epoch 56 had a loss of 55.778:
correct: 1778, total: 2500
validation accuracy: 71.12
epoch 57 had a loss of 63.425:
epoch 58 had a loss of 54.342:
epoch 59 had a loss of 60.929:
epoch 60 had a loss of 52.447:
epoch 61 had a loss of 59.177:
correct: 1784, total: 2500
validation accuracy: 71.36
epoch 62 had a loss of 50.707:
epoch 63 had a loss of 59.645:
epoch 64 had a loss of 48.444:
epoch 65 had a loss of 58.692:
epoch 66 had a loss of 47.309:
correct: 1796, total: 2500
validation accuracy: 71.84
epoch 67 had a loss of 56.025:
epoch 68 had a loss of 46.815:
epoch 69 had a loss of 57.314:
epoch 70 had a loss of 45.563:
epoch 71 had a loss of 54.548:
correct: 1807, total: 2500
validation accuracy: 72.28
epoch 72 had a loss of 43.947:
epoch 73 had a loss of 51.221:
epoch 74 had a loss of 43.44:
epoch 75 had a loss of 50.027:
epoch 76 had a loss of 41.055:
correct: 1806, total: 2500
validation accuracy: 72.24
epoch 77 had a loss of 49.864:
epoch 78 had a loss of 40.638:
epoch 79 had a loss of 47.843:
epoch 80 had a loss of 40.034:
epoch 81 had a loss of 46.633:
correct: 1817, total: 2500
validation accuracy: 72.68
epoch 82 had a loss of 38.564:
epoch 83 had a loss of 45.391:
epoch 84 had a loss of 38.921:
epoch 85 had a loss of 44.739:
epoch 86 had a loss of 36.371:
correct: 1825, total: 2500
validation accuracy: 73.00
epoch 87 had a loss of 42.843:
epoch 88 had a loss of 36.642:
epoch 89 had a loss of 44.325:
epoch 90 had a loss of 35.034:
epoch 91 had a loss of 43.729:
correct: 1835, total: 2500
validation accuracy: 73.40
epoch 92 had a loss of 34.83:
epoch 93 had a loss of 43.054:
epoch 94 had a loss of 34.262:
epoch 95 had a loss of 42.408:
epoch 96 had a loss of 33.244:
correct: 1844, total: 2500
validation accuracy: 73.76
epoch 97 had a loss of 40.137:
epoch 98 had a loss of 32.036:
epoch 99 had a loss of 40.617:
epoch 100 had a loss of 31.283:
epoch 101 had a loss of 38.893:
correct: 1818, total: 2500
validation accuracy: 72.72
epoch 102 had a loss of 30.514:
epoch 103 had a loss of 36.414:
epoch 104 had a loss of 29.846:
epoch 105 had a loss of 35.484:
epoch 106 had a loss of 28.688:
correct: 1844, total: 2500
validation accuracy: 73.76
epoch 107 had a loss of 32.477:
epoch 108 had a loss of 27.244:
epoch 109 had a loss of 33.256:
epoch 110 had a loss of 26.222:
epoch 111 had a loss of 31.101:
correct: 1846, total: 2500
validation accuracy: 73.84
epoch 112 had a loss of 26.721:
epoch 113 had a loss of 30.268:
epoch 114 had a loss of 25.618:
epoch 115 had a loss of 28.931:
epoch 116 had a loss of 25.284:
correct: 1859, total: 2500
validation accuracy: 74.36
epoch 117 had a loss of 28.243:
epoch 118 had a loss of 24.153:
epoch 119 had a loss of 28.069:
epoch 120 had a loss of 23.269:
epoch 121 had a loss of 27.448:
correct: 1866, total: 2500
validation accuracy: 74.64
epoch 122 had a loss of 22.092:
epoch 123 had a loss of 25.823:
epoch 124 had a loss of 22.316:
epoch 125 had a loss of 26.363:
epoch 126 had a loss of 22.562:
correct: 1869, total: 2500
validation accuracy: 74.76
epoch 127 had a loss of 25.361:
epoch 128 had a loss of 21.893:
epoch 129 had a loss of 23.67:
epoch 130 had a loss of 20.472:
epoch 131 had a loss of 24.869:
correct: 1872, total: 2500
validation accuracy: 74.88
epoch 132 had a loss of 20.084:
epoch 133 had a loss of 23.902:
epoch 134 had a loss of 20.17:
epoch 135 had a loss of 24.072:
epoch 136 had a loss of 19.705:
correct: 1873, total: 2500
validation accuracy: 74.92
epoch 137 had a loss of 23.764:
epoch 138 had a loss of 20.042:
epoch 139 had a loss of 24.471:
epoch 140 had a loss of 19.268:
epoch 141 had a loss of 26.174:
correct: 1865, total: 2500
validation accuracy: 74.60
epoch 142 had a loss of 19.303:
epoch 143 had a loss of 26.392:
epoch 144 had a loss of 20.878:
epoch 145 had a loss of 29.359:
epoch 146 had a loss of 19.566:
correct: 1882, total: 2500
validation accuracy: 75.28
epoch 147 had a loss of 29.096:
epoch 148 had a loss of 17.609:
epoch 149 had a loss of 28.129:
epoch 150 had a loss of 16.877:
epoch 151 had a loss of 25.728:
correct: 1887, total: 2500
validation accuracy: 75.48
epoch 152 had a loss of 16.436:
epoch 153 had a loss of 24.018:
epoch 154 had a loss of 15.716:
epoch 155 had a loss of 20.792:
epoch 156 had a loss of 14.76:
correct: 1888, total: 2500
validation accuracy: 75.52
epoch 157 had a loss of 19.901:
epoch 158 had a loss of 14.685:
epoch 159 had a loss of 20.57:
epoch 160 had a loss of 14.596:
epoch 161 had a loss of 20.118:
correct: 1909, total: 2500
validation accuracy: 76.36
epoch 162 had a loss of 14.506:
epoch 163 had a loss of 18.156:
epoch 164 had a loss of 14.234:
epoch 165 had a loss of 18.269:
epoch 166 had a loss of 14.057:
correct: 1897, total: 2500
validation accuracy: 75.88
epoch 167 had a loss of 16.399:
epoch 168 had a loss of 13.191:
epoch 169 had a loss of 16.284:
epoch 170 had a loss of 12.713:
epoch 171 had a loss of 14.768:
correct: 1897, total: 2500
validation accuracy: 75.88
epoch 172 had a loss of 12.85:
epoch 173 had a loss of 14.618:
epoch 174 had a loss of 12.6:
epoch 175 had a loss of 13.754:
epoch 176 had a loss of 12.1:
correct: 1896, total: 2500
validation accuracy: 75.84
epoch 177 had a loss of 14.189:
epoch 178 had a loss of 12.096:
epoch 179 had a loss of 13.668:
epoch 180 had a loss of 13.047:
epoch 181 had a loss of 14.236:
correct: 1884, total: 2500
validation accuracy: 75.36
epoch 182 had a loss of 12.84:
epoch 183 had a loss of 12.927:
epoch 184 had a loss of 12.742:
epoch 185 had a loss of 12.622:
epoch 186 had a loss of 13.217:
correct: 1904, total: 2500
validation accuracy: 76.16
epoch 187 had a loss of 13.594:
epoch 188 had a loss of 12.24:
epoch 189 had a loss of 12.772:
epoch 190 had a loss of 12.673:
epoch 191 had a loss of 12.067:
correct: 1835, total: 2500
validation accuracy: 73.40
epoch 192 had a loss of 13.577:
epoch 193 had a loss of 13.533:
epoch 194 had a loss of 13.713:
epoch 195 had a loss of 12.667:
epoch 196 had a loss of 11.576:
correct: 1915, total: 2500
validation accuracy: 76.60
epoch 197 had a loss of 12.638:
epoch 198 had a loss of 10.731:
epoch 199 had a loss of 12.506:
epoch 200 had a loss of 10.383:
epoch 201 had a loss of 12.581:
correct: 1844, total: 2500
validation accuracy: 73.76
epoch 202 had a loss of 12.099:
epoch 203 had a loss of 13.259:
epoch 204 had a loss of 10.19:
epoch 205 had a loss of 14.359:
epoch 206 had a loss of 10.284:
correct: 1917, total: 2500
validation accuracy: 76.68
epoch 207 had a loss of 14.6:
epoch 208 had a loss of 10.931:
epoch 209 had a loss of 12.596:
epoch 210 had a loss of 10.348:
epoch 211 had a loss of 11.99:
correct: 1856, total: 2500
validation accuracy: 74.24
epoch 212 had a loss of 11.241:
epoch 213 had a loss of 11.915:
epoch 214 had a loss of 11.312:
epoch 215 had a loss of 10.298:
epoch 216 had a loss of 10.769:
correct: 1921, total: 2500
validation accuracy: 76.84
epoch 217 had a loss of 9.7846:
epoch 218 had a loss of 10.23:
epoch 219 had a loss of 8.9255:
epoch 220 had a loss of 9.081:
epoch 221 had a loss of 8.568:
correct: 1853, total: 2500
validation accuracy: 74.12
epoch 222 had a loss of 8.9211:
epoch 223 had a loss of 8.0079:
epoch 224 had a loss of 9.1798:
epoch 225 had a loss of 7.9771:
epoch 226 had a loss of 8.3975:
correct: 1921, total: 2500
validation accuracy: 76.84
epoch 227 had a loss of 9.0924:
epoch 228 had a loss of 8.2583:
epoch 229 had a loss of 8.2822:
epoch 230 had a loss of 7.976:
epoch 231 had a loss of 9.0271:
correct: 1854, total: 2500
validation accuracy: 74.16
epoch 232 had a loss of 8.3784:
epoch 233 had a loss of 8.3691:
epoch 234 had a loss of 8.1224:
epoch 235 had a loss of 8.0464:
epoch 236 had a loss of 7.1277:
correct: 1926, total: 2500
validation accuracy: 77.04
epoch 237 had a loss of 8.9603:
epoch 238 had a loss of 8.8157:
epoch 239 had a loss of 9.0398:
epoch 240 had a loss of 6.0725:
epoch 241 had a loss of 9.0863:
correct: 1908, total: 2500
validation accuracy: 76.32
epoch 242 had a loss of 6.1797:
epoch 243 had a loss of 8.3911:
epoch 244 had a loss of 6.0233:
epoch 245 had a loss of 7.594:
epoch 246 had a loss of 5.2482:
correct: 1922, total: 2500
validation accuracy: 76.88
epoch 247 had a loss of 8.3338:
epoch 248 had a loss of 6.1232:
epoch 249 had a loss of 8.2647:
epoch 250 had a loss of 6.3322:

In [56]:
correct = 0
for i, (mb, tgts) in enumerate(dlvs):
    mb, tgts = torch.autograd.Variable(mb), torch.autograd.Variable(tgts.float())
    out = model(mb)
    pred = out.round()
    correct += (pred == tgts).data.sum()
print(correct, correct / len(vs), len(vs))
print(torch.stack((pred.data, tgts.data), 1))


2019 0.8076 2500

    0     0
    1     1
    0     0
    0     1
    1     1
    0     0
    0     0
    1     0
    0     0
    1     1
    0     0
    1     0
    1     1
    0     0
    1     0
    0     0
    0     0
    1     0
    0     0
    1     0
    0     0
    1     1
    0     1
    1     1
    0     0
    0     0
    1     1
    0     1
    1     1
    1     1
    1     0
    1     1
    0     0
    0     0
    0     0
    0     0
    0     1
    0     1
    1     1
    1     0
    0     0
    1     1
    1     0
    1     1
    0     1
    0     0
    1     1
    0     1
    0     0
    0     0
    0     0
    1     1
    0     0
    1     1
    0     0
    0     0
    0     1
    1     1
    1     1
    1     1
    1     0
    0     0
    1     1
    0     0
    0     0
    0     0
    0     0
    1     0
    1     1
    0     0
    0     0
    0     0
    1     1
    1     1
    1     1
    1     1
    0     1
    0     0
    1     1
    0     1
    0     0
    0     0
    1     1
    1     1
    0     0
    1     1
    1     1
    0     0
    1     1
    0     0
    1     0
    0     0
    1     1
    1     1
    0     0
    0     0
    0     0
    0     0
    0     0
    1     1
[torch.FloatTensor of size 100x2]


In [53]:
torch.save(model.state_dict(), "model_imdb_20170912.pt")

torchtext


In [ ]:
import torchtext
import torchtext.data as ttdata
TEXT = ttdata.Field()
LABEL = ttdata.Field(sequential=False)
imdb_ds = torchtext.datasets.IMDB("/home/david/imdb_sentiment/data", TEXT, LABEL)
train_iter, test_iter = imdb_ds.iters(batch_size=4, device=-1)

In [ ]:
train_iter, test_iter = imdb_ds.iters(batch_size=25, device=-1)
for x in train_iter:
    print(x.text, x.label.size())
    break

In [52]:
%ls


aae_supervised.py            notes.md
algore/                      numpy_reshape_test.ipynb
AlGore_2009.sph              pad_test.py
AlGore_2009.stm              pcsnpny-20150204-mkj/
audio_rnn_basic.ipynb        pcsnpny-20150204-mkj.tgz
clipmin.png                  piano2.mp3
CNN2RNN.ipynb                piano.mp3
collate_variable.py          piano_new.wav
data/                        playground.ipynb
data.zip                     predict_audio.ipynb
deepspeech1d.ipynb           Presentation.ipynb
denoising_autoencoder.ipynb  prime_factors.py
extract_mnist.py             pyaudio-test.py
file2.wav                    __pycache__/
file.flac                    pytorch_basics.ipynb
file.mp3                     PyTorch Embeddings Test.ipynb
file.wav                     pytorch_tutorial_classify_names.ipynb
francemusique/               rnn_autoencoder.ipynb
G729VAD.ipynb                rnn_beispiel1.png
GRUAutoencoder.ipynb         rnn_beispiel2.png
hallöchen.wav                rnn_predictions_final.png
helloworld.py*               rnn_predictions.png
imdb_tokenize.ipynb          smoother1.png
kmeans1.png                  snipsdata/
kmeans2.png                  Snips_Report.ipynb
label_smoothing.ipynb        starters/
levinson.py                  startup.sh
librispeech_load_txts.ipynb  stm_sph_processing.ipynb
librispeech.py               tedlium1.png
loader.py                    tedlium2.png
mini_librispeech_dataset/    tedlium_labels.ipynb
mlp_beispiel_noise1.png      test_argparse.py
mlp_beispiel_noise2.png      test_nltk.py
mlp_beispiel_nonoise1.png    test_spacy.py
mlp_beispiel_nonoise2.png    timeseries.ipynb
mlp_predictions_noise.png    torchaudio
mlp_predictions.png          torchaudio_vs_librosa.ipynb
MNIST/                       VAD_labeling.ipynb
model_20170912.pt            wavelet.ipynb
models/                      yesno_torchaudio_playground.ipynb
mu_law_companding.ipynb      zero2d_test.py