In [2]:
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable

In [4]:
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001

In [5]:
train_dataset = dsets.MNIST(root='./data',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data',
                           train=False,
                           transform=transforms.ToTensor())

In [7]:
train_dataset


Out[7]:
<torchvision.datasets.mnist.MNIST at 0x10b5e7908>

In [8]:
test_dataset


Out[8]:
<torchvision.datasets.mnist.MNIST at 0x10b5e7978>

In [9]:
len(train_dataset)


Out[9]:
60000

In [10]:
dir(train_dataset)


Out[10]:
['__class__',
 '__delattr__',
 '__dict__',
 '__dir__',
 '__doc__',
 '__eq__',
 '__format__',
 '__ge__',
 '__getattribute__',
 '__getitem__',
 '__gt__',
 '__hash__',
 '__init__',
 '__le__',
 '__len__',
 '__lt__',
 '__module__',
 '__ne__',
 '__new__',
 '__reduce__',
 '__reduce_ex__',
 '__repr__',
 '__setattr__',
 '__sizeof__',
 '__str__',
 '__subclasshook__',
 '__weakref__',
 '_check_exists',
 'download',
 'processed_folder',
 'raw_folder',
 'root',
 'target_transform',
 'test_file',
 'train',
 'train_data',
 'train_labels',
 'training_file',
 'transform',
 'urls']

In [20]:
train_dataset.train_data[0].size()


Out[20]:
torch.Size([28, 28])

In [21]:
train_dataset.train_labels[0]


Out[21]:
5

In [22]:
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

In [23]:
class Net(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes)
    
    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

In [24]:
net = Net(input_size, hidden_size, num_classes)

In [25]:
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()  
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)

In [26]:
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images.view(-1, 28 * 28))
        labels = Variable(labels)
        
        optimizer.zero_grad()
        outputs = net(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        if (i + 1) % 100 == 0:
            print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
                  % (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))


Epoch [1/5], Step [100/600], Loss: 0.3636
Epoch [1/5], Step [200/600], Loss: 0.3453
Epoch [1/5], Step [300/600], Loss: 0.1780
Epoch [1/5], Step [400/600], Loss: 0.2170
Epoch [1/5], Step [500/600], Loss: 0.1681
Epoch [1/5], Step [600/600], Loss: 0.1237
Epoch [2/5], Step [100/600], Loss: 0.0699
Epoch [2/5], Step [200/600], Loss: 0.1338
Epoch [2/5], Step [300/600], Loss: 0.1383
Epoch [2/5], Step [400/600], Loss: 0.1322
Epoch [2/5], Step [500/600], Loss: 0.0733
Epoch [2/5], Step [600/600], Loss: 0.0421
Epoch [3/5], Step [100/600], Loss: 0.1070
Epoch [3/5], Step [200/600], Loss: 0.0854
Epoch [3/5], Step [300/600], Loss: 0.0776
Epoch [3/5], Step [400/600], Loss: 0.0578
Epoch [3/5], Step [500/600], Loss: 0.0900
Epoch [3/5], Step [600/600], Loss: 0.0460
Epoch [4/5], Step [100/600], Loss: 0.0425
Epoch [4/5], Step [200/600], Loss: 0.0399
Epoch [4/5], Step [300/600], Loss: 0.0273
Epoch [4/5], Step [400/600], Loss: 0.0154
Epoch [4/5], Step [500/600], Loss: 0.0253
Epoch [4/5], Step [600/600], Loss: 0.0391
Epoch [5/5], Step [100/600], Loss: 0.0628
Epoch [5/5], Step [200/600], Loss: 0.0733
Epoch [5/5], Step [300/600], Loss: 0.0391
Epoch [5/5], Step [400/600], Loss: 0.0150
Epoch [5/5], Step [500/600], Loss: 0.0140
Epoch [5/5], Step [600/600], Loss: 0.0701

In [27]:
correct = 0
total = 0
for images, labels in test_loader:
    images = Variable(images.view(-1, 28 * 28))
    outputs = net(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()

In [28]:
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct /total))


Accuracy of the network on the 10000 test images: 97 %

In [29]:
torch.save(net.state_dict(), 'model.pkl')

In [30]:
net.state_dict().keys()


Out[30]:
odict_keys(['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias'])

In [33]:
net.state_dict()['fc1.weight'].size()


Out[33]:
torch.Size([500, 784])

In [ ]: