In [ ]:
import re
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
#import torchvision
from torchvision.utils import make_grid
from PIL import Image
#from skimage import io #, transform
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
# import torch.utils.trainer as trainer
# import torch.utils.trainer.plugins
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import numpy as np
import os
# from torchsample.modules import ModuleTrainer
# from torchsample.metrics import CategoricalAccuracy
%matplotlib inline
In [ ]:
# Set some path stuff
path = "data/dogscats_redux/"
# path = "data/dogscats_redux/sample"
use_cuda = True
batch_size = 64
print('Using CUDA:', use_cuda)
traindir = os.path.join(path, 'train')
validdir = os.path.join(path, 'valid')
testdir = os.path.join(path, 'test')
In [ ]:
test_files = [file for file in os.listdir(testdir) if '.jpg' in file]
In [ ]:
example_image_path = testdir + '/' + test_files[3]
example_image = Image.open(example_image_path)
In [ ]:
def show_image(image):
plt.figure()
plt.imshow(image)
plt.show()
In [ ]:
show_image(example_image)
In [ ]:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
comp_tsfm = transforms.Compose([
transforms.Lambda(lambda img: img.resize((224,224))),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
In [ ]:
tsfm_torch = comp_tsfm(example_image)
In [ ]:
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
In [ ]:
rvrs_tsfm = transforms.Compose([
UnNormalize(mean=mean, std=std),
transforms.ToPILImage()
])
In [ ]:
rvrs_tsfm(comp_tsfm(example_image))
In [ ]:
def get_dataset(dirname, tsfm=None):
return datasets.ImageFolder(dirname, transform=tsfm)
def get_loader(dataset, use_cuda, batch_size=64, shuffle=True):
return DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=use_cuda)
In [ ]:
train_dataset = get_dataset(traindir, tsfm=comp_tsfm)
train_loader = get_loader(train_dataset, use_cuda)
In [ ]:
classes = train_dataset.classes
n_classes = len(classes)
In [ ]:
# Load in pretrained VGG
vgg16 = models.vgg16(pretrained=True)
# Finetune by replacing the last fully connected layer and freezing all network parameters
for param in vgg16.parameters():
param.requires_grad = False
print('Using {:d} classes: {}'.format(n_classes, classes))
In [ ]:
vgg16.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(),
nn.Linear(4096, n_classes),
)
In [ ]:
# Monkey patch the parameters() to return trainable weights only
import types
def parameters(self):
p = filter(lambda p: p.requires_grad, nn.Module.parameters(self))
return p
vgg16.parameters = types.MethodType(parameters, vgg16)
In [ ]:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(vgg16.parameters(), lr=0.001, weight_decay=.001)
In [ ]:
# enable cuda if available
if(use_cuda):
vgg16.cuda()
criterion.cuda()
In [ ]:
for epoch in range(60):
running_loss = 0.0
for i, data in enumerate(train_loader):
# get inputs and labels
inputs, labels = data
# wrap in Variable
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
# zero the gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = vgg16(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 5 == 0: # print every minibatch
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss))
running_loss = 0.0
print('Finished Training')
In [ ]:
valid_dataset = get_dataset(validdir, tsfm=comp_tsfm)
valid_loader = get_loader(valid_dataset, use_cuda, batch_size=4)
valid_iter = iter(valid_loader)
In [ ]:
# Define some helper functions
def get_classes_strings(classes, labels_ids):
# returns the classes in string format
return [classes[label_id] for label_id in labels_ids]
def get_prediction_classes_ids(predictions):
# returns the predictions in id format
predictions_ids = predictions.cpu().data.numpy().argmax(1)
return predictions_ids
def get_prediction_classes_strings(classes, predictions):
# returns the predictions in string format
return get_classes_strings(classes, get_prediction_classes_ids(predictions))
def show(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
rvrs_tsfm_tensor = transforms.Compose([
UnNormalize(mean, std),
])
def predictions_vs_actuals(iterator, use_cuda, model):
images, labels = iterator.next()
img_list = [rvrs_tsfm_tensor(img) for img in images]
labels_string = get_classes_strings(classes, labels.numpy())
print('Actuals: ', labels_string)
show(make_grid(img_list, padding=100))
# display the predictons for the images above
if use_cuda:
images = images.cuda()
predictions = model(Variable(images))
predictions_string = get_prediction_classes_strings(classes, predictions)
print('Predictions: ', predictions_string)
def check_accuracy(data_loader, use_cuda, model):
correct = 0
seen = 0
total_len = len(data_loader)
for i, data in enumerate(data_loader):
images, labels = data
seen += len(images)
predictions = model(Variable(images.cuda()))
# labels is tensor, predictions is variable; predictions pull data out to numpy
correct += (labels.numpy() == predictions.max(1)[1].cpu().data.numpy()).sum() #predictions.max(1)[1] returns indicies of max preds
print('Accuracy: {0}, Saw: {1}, Correct: {2}'.format(correct/seen, seen, correct))
In [ ]:
predictions_vs_actuals(valid_iter, use_cuda, vgg16)
In [ ]:
check_accuracy(valid_loader, use_cuda, vgg16)
In [ ]:
sample_submission = pd.read_csv(path + '/sample_submission.csv')
In [ ]:
class CatsDogsReduxTestDataset(Dataset):
"""Args: path to dir, transforms"""
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
self.samples = [filename for filename in os.listdir(root_dir) if '.jpg' in filename]
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.samples[idx])
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
#image = Image.fromarray(image)
return image, int(re.findall(r'\d+', self.samples[idx])[0])
In [ ]:
def predict_testset(data_loader, use_cuda, model, softmax = False):
ids_list = []
predictions_list = []
for i, data in enumerate(data_loader):
# try:
images, ids = data
predictions = model(Variable(images.cuda()))
# pdb.set_trace()
# labels is tensor, predictions is variable; predictions pull data out to numpy
ids_list.extend(ids)
if softmax == False:
predictions_list.extend(predictions.max(1)[1].cpu().data.numpy()) #predictions.max(1)[1] returns indicies of max preds
else:
predictions_list.extend(nn.functional.softmax(predictions)[:,1].cpu().data.numpy())
# except StopIteration:
print('Finished predicting')
return list(zip(ids_list, predictions_list))
def predictions_vs_pics(iterator, use_cuda, model):
images, _ = iterator.next()
img_list = [rvrs_tsfm_tensor(img) for img in images]
show(make_grid(img_list, padding=100))
# display the predictons for the images above
if use_cuda:
images = images.cuda()
predictions = model(Variable(images))
predictions_string = get_prediction_classes_strings(classes, predictions)
print('Predictions: ', predictions_string)
In [ ]:
test_dataset = CatsDogsReduxTestDataset(testdir, comp_tsfm)
test_loader = get_loader(test_dataset, use_cuda, batch_size=64)
test_iter = iter(test_loader)
In [ ]:
# predictions_vs_pics(test_iter, use_cuda, vgg16)
In [ ]:
results = predict_testset(test_loader, use_cuda, vgg16, softmax=True)
In [ ]:
results_frame = pd.DataFrame(results, columns=['id', 'label'])
results_frame.sort_values('id', inplace=True)
results_frame['label'] = np.clip(results_frame['label'], 0.025, 0.975)
In [ ]:
results_frame.to_csv(path + '/submission.csv', index=False, index_label=False)
In [ ]: