In [1]:
import torch 
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable

In [2]:
num_epochs = 5
batch_size = 100
learning_rate = 0.001

In [20]:
train_dataset = dsets.MNIST(root='./data/',
                            train=True, 
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data/',
                           train=False, 
                           transform=transforms.ToTensor())

In [21]:
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size, 
                                          shuffle=False)

In [61]:
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.fc = nn.Linear(7 * 7 * 32, 10)
    
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out

In [62]:
cnn = CNN()

In [63]:
cnn


Out[63]:
CNN (
  (layer1): Sequential (
    (0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True)
    (2): ReLU ()
    (3): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1))
  )
  (layer2): Sequential (
    (0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
    (2): ReLU ()
    (3): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1))
  )
  (fc): Linear (1568 -> 10)
)

In [64]:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

In [65]:
# Train the Model
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images)
        labels = Variable(labels)
        
        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = cnn(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' 
                   %(epoch+1, num_epochs, i+1, len(train_dataset) // batch_size, loss.data[0]))


Epoch [1/5], Iter [100/600] Loss: 0.1221
Epoch [1/5], Iter [200/600] Loss: 0.1609
Epoch [1/5], Iter [300/600] Loss: 0.0711
Epoch [1/5], Iter [400/600] Loss: 0.0576
Epoch [1/5], Iter [500/600] Loss: 0.0682
Epoch [1/5], Iter [600/600] Loss: 0.0216
Epoch [2/5], Iter [100/600] Loss: 0.0442
Epoch [2/5], Iter [200/600] Loss: 0.0699
Epoch [2/5], Iter [300/600] Loss: 0.0435
Epoch [2/5], Iter [400/600] Loss: 0.0309
Epoch [2/5], Iter [500/600] Loss: 0.0633
Epoch [2/5], Iter [600/600] Loss: 0.0760
Epoch [3/5], Iter [100/600] Loss: 0.0219
Epoch [3/5], Iter [200/600] Loss: 0.0276
Epoch [3/5], Iter [300/600] Loss: 0.0570
Epoch [3/5], Iter [400/600] Loss: 0.1438
Epoch [3/5], Iter [500/600] Loss: 0.0298
Epoch [3/5], Iter [600/600] Loss: 0.0261
Epoch [4/5], Iter [100/600] Loss: 0.0108
Epoch [4/5], Iter [200/600] Loss: 0.0115
Epoch [4/5], Iter [300/600] Loss: 0.0218
Epoch [4/5], Iter [400/600] Loss: 0.0225
Epoch [4/5], Iter [500/600] Loss: 0.0304
Epoch [4/5], Iter [600/600] Loss: 0.0027
Epoch [5/5], Iter [100/600] Loss: 0.0372
Epoch [5/5], Iter [200/600] Loss: 0.0460
Epoch [5/5], Iter [300/600] Loss: 0.0281
Epoch [5/5], Iter [400/600] Loss: 0.0474
Epoch [5/5], Iter [500/600] Loss: 0.0650
Epoch [5/5], Iter [600/600] Loss: 0.0251

In [66]:
# Test the Model
cnn.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0
total = 0
for images, labels in test_loader:
    images = Variable(images)
    outputs = cnn(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()

print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))

# Save the Trained Model
torch.save(cnn.state_dict(), 'cnn.pkl')


Test Accuracy of the model on the 10000 test images: 98 %

In [ ]: