In [1]:
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F

In [2]:
m = nn.Conv2d(1, 6, 5)

In [3]:
m


Out[3]:
Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))

In [32]:
input = torch.autograd.Variable(torch.randn(1, 1, 32, 32))

In [34]:
output = m(input)

In [35]:
output.size()


Out[35]:
torch.Size([1, 6, 28, 28])

In [36]:
m1 = nn.Linear(20,30)

In [37]:
m1


Out[37]:
Linear (20 -> 30)

In [38]:
input1 = Variable(torch.randn(128,20))

In [40]:
output1 = m1(input1)

In [41]:
output1.size()


Out[41]:
torch.Size([128, 30])

In [49]:
x = Variable(torch.randn(2, 2, 2))

In [52]:
x.view


Out[52]:
Variable containing:
-0.5078  0.3558  1.3433 -1.9786  0.0182  0.6772 -0.1476 -0.2288
[torch.FloatTensor of size 1x8]

In [50]:
x


Out[50]:
Variable containing:
(0 ,.,.) = 
 -0.5078  0.3558
  1.3433 -1.9786

(1 ,.,.) = 
  0.0182  0.6772
 -0.1476 -0.2288
[torch.FloatTensor of size 2x2x2]

In [55]:
torch.arange(1, 11, 2)


Out[55]:
 1
 3
 5
 7
 9
[torch.FloatTensor of size 5]

In [57]:
import torchvision

In [ ]: