In [1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchaudio

In [13]:
X = torch.arange(0, 38).view(1, 1, -1)
X = Variable(X)
ksize = 3
conv1d1_3dN = []
pad1dN = []
for i in range(4):
    conv1d1_3dN += [nn.Conv1d(in_channels=1, out_channels=1, stride=1, padding=0, kernel_size=ksize, dilation=(i+1), bias=False)]
    pad1dN += [nn.ConstantPad1d(((i+1), (i+1)), 0)]
outs = [X]
for i, cv in enumerate(zip(conv1d1_3dN, pad1dN)):
    cv[0].weight.data.fill_(1.0 / ksize)
    model = nn.Sequential(cv[1], cv[0])
    #model = cv[0]
    out = model(outs[i])
    outs.append(out.long().float())
outs


Out[13]:
[Variable containing:
 (0 ,.,.) = 
 
 Columns 0 to 18 
     0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
 
 Columns 19 to 37 
    19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36  37
 [torch.FloatTensor of size 1x1x38], Variable containing:
 (0 ,.,.) = 
 
 Columns 0 to 18 
     0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
 
 Columns 19 to 37 
    19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36  24
 [torch.FloatTensor of size 1x1x38], Variable containing:
 (0 ,.,.) = 
 
 Columns 0 to 18 
     0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
 
 Columns 19 to 37 
    19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  30  23  19
 [torch.FloatTensor of size 1x1x38], Variable containing:
 (0 ,.,.) = 
 
 Columns 0 to 18 
     1   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
 
 Columns 19 to 37 
    19  20  21  22  23  24  25  26  27  28  29  30  31  30  28  28  20  18  17
 [torch.FloatTensor of size 1x1x38], Variable containing:
 (0 ,.,.) = 
 
 Columns 0 to 18 
     1   2   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
 
 Columns 19 to 37 
    19  20  21  22  23  24  25  26  27  27  27  28  26  25  24  19  17  16  15
 [torch.FloatTensor of size 1x1x38]]

In [3]:


In [ ]: