In [ ]:
import re
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
#import torchvision
from torchvision.utils import make_grid
from PIL import Image
#from skimage import io #, transform
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
# import torch.utils.trainer as trainer
# import torch.utils.trainer.plugins
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import numpy as np
import os
import shutil, errno
from tqdm import tqdm_notebook

# from torchsample.modules import ModuleTrainer
# from torchsample.metrics import CategoricalAccuracy

%matplotlib notebook
%pdb

In [ ]:
def copyanything(src, dst):
    try:
        shutil.copytree(src, dst)
    except OSError as exc: # python >2.5
        if exc.errno == errno.ENOTDIR:
            shutil.copy(src, dst)
        else: raise

In [ ]:
# Set some path stuff
path = "data/statefarm/"
# path = "data/statefarm/sample/"
use_cuda = True
batch_size = 64
print('Using CUDA:', use_cuda)

traindir = os.path.join(path, 'train')
validdir = os.path.join(path, 'valid')
testdir = os.path.join(path, 'test')

Unzip data


In [ ]:
# !rm -rf data/statefarm/driver_imgs_list.csv
# !rm -rf data/statefarm/sample
# !rm -rf data/statefarm/sample_submission.csv
# !rm -rf data/statefarm/test
# !rm -rf data/statefarm/train
# !rm -rf data/statefarm/valid
# os.listdir(path)

In [ ]:
if 'driver_imgs_list.csv' not in os.listdir(path):
    !unzip data/statefarm/driver_imgs_list.csv.zip -d data/statefarm
if 'test' not in os.listdir(path):
    !unzip data/statefarm/imgs.zip -d data/statefarm
if 'sample_submssion' not in os.listdir(path):
    !unzip data/statefarm/sample_submission.csv.zip -d data/statefarm

Make validation set


In [ ]:
os.listdir(path)

In [ ]:
inputpath = traindir
outputpath = validdir

for dirpath, dirnames, filenames in os.walk(inputpath):
    print('__________________________________________')
    classes = dirpath[len(inputpath)+1:]
    structure = os.path.join(outputpath, classes)
    if not os.path.isdir(structure):
        os.mkdir(structure)
        print('For class {0}, moving files to validation set'.format(classes))
        ori_n = len(filenames)
        if ori_n > 10:
            valid_to_move = np.random.choice(filenames, int(len(filenames)/10), replace=False)
            for file in valid_to_move:
                os.rename(os.path.join(dirpath, file),os.path.join(structure, file))
            moved_n = len(os.listdir(structure))
            print('Originally {0} files in {1}, moved {2} to {3}'.format(ori_n,dirpath,moved_n,structure))
        else:
            print('No files to move to validation set for {0}'.format(dirpath))
        
    else:
        print('The folder {0} already exists. Check that it has files moved for validation set'.format(structure))

In [ ]:
dirs = [d for d in os.listdir(validdir) if os.path.isdir(os.path.join(validdir, d))]
for dirname in dirs:
    v_set = set(os.listdir(os.path.join(validdir,dirname)))
    t_set = set(os.listdir(os.path.join(traindir,dirname)))
    if len(v_set.intersection(t_set)) > 0:
        print('Problem')

In [ ]:
len(os.listdir(os.path.join(structure)))

In [ ]:
len(os.listdir(dirpath))

Make sample folder


In [ ]:
# !rm -rf data/statefarm/sample/

In [ ]:
samp_path = "data/statefarm/sample/"
if not os.path.isdir(samp_path):
    os.mkdir(samp_path)

In [ ]:
inputpath = path
outputpath = samp_path

for dirpath, dirnames, filenames in os.walk(inputpath):
#     print(dirpath, dirnames)
    structure = os.path.join(outputpath, dirpath[len(inputpath):])
    if (samp_path + '/sample' in structure):
        pass
    elif not os.path.isdir(structure):
        os.mkdir(structure)
        if len(filenames) > 0:
            files_to_copy = np.random.choice(filenames, 20, replace=False)
            for file in files_to_copy:
                copyanything(os.path.join(dirpath, file), os.path.join(structure,file))
    else:
#         print(dirpath)
        print(structure)
        print(samp_path)
#         print(dirnames)
#         print(filenames)
        print("Folder {0} already exists!".format(structure))

In [ ]:
os.listdir('data/statefarm')

In [ ]:
os.listdir('data/statefarm/sample')

In [ ]:
# check a sample
set(os.listdir('data/statefarm/sample/train/c0')).intersection(set(os.listdir('data/statefarm/train/c0')))

Show one image


In [ ]:
os.listdir(validdir)

In [ ]:
test_files = [file for file in os.listdir(testdir) if '.jpg' in file]

In [ ]:
example_image_path = testdir + '/' + test_files[3]
example_image = Image.open(example_image_path)

In [ ]:
def show_image(image):
    plt.figure()
    plt.imshow(image)
    plt.show()

In [ ]:
show_image(example_image)

Figure out how to process the example image for model to accept it


In [ ]:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
comp_tsfm = transforms.Compose([
        transforms.Lambda(lambda img: img.resize((224,224))),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)])

In [ ]:
tsfm_torch = comp_tsfm(example_image)

In [ ]:
class UnNormalize(object):
    def __init__(self, mean, std):
        self.mean = mean
        self.std = std

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
        Returns:
            Tensor: Normalized image.
        """
        for t, m, s in zip(tensor, self.mean, self.std):
            t.mul_(s).add_(m)
            # The normalize code -> t.sub_(m).div_(s)
        return tensor

In [ ]:
rvrs_tsfm = transforms.Compose([
    UnNormalize(mean=mean, std=std),
    transforms.ToPILImage()
])

In [ ]:
rvrs_tsfm(comp_tsfm(example_image))

Get datasets ready


In [ ]:
def get_dataset(dirname, tsfm=None):
    return datasets.ImageFolder(dirname, transform=tsfm)
    
def get_loader(dataset, use_cuda, batch_size, shuffle=True):
    return DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=use_cuda)

In [ ]:
train_dataset = get_dataset(traindir, tsfm=comp_tsfm)
train_loader = get_loader(train_dataset, use_cuda, batch_size = 128)

In [ ]:
classes = train_dataset.classes
n_classes = len(classes)

Restructure the classification portion for fine tuning on statefarm


In [ ]:
# Load in pretrained VGG
# vgg16 = models.vgg16(pretrained=True)
model = models.vgg19_bn(pretrained=True)

# Finetune by replacing the last fully connected layer and freezing all network parameters
for param in model.parameters():
    param.requires_grad = False
    
print('Using {:d} classes: {}'.format(n_classes, classes))

In [ ]:
model.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(4096, n_classes),
        )

In [ ]:
# Monkey patch the parameters() to return trainable weights only
import types

def parameters(self):
    p = filter(lambda p: p.requires_grad, nn.Module.parameters(self))
    return p

model.parameters = types.MethodType(parameters, model)

Define loss and optimizer


In [ ]:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=.001)

Train


In [ ]:
# enable cuda if available
if(use_cuda):
    model.cuda()
    criterion.cuda()

In [ ]:
model_name = 'vgg19_bn_ft'

savedir = os.path.join(path, 'save_{0}'.format(model_name))
if not os.path.isdir(savedir):
    os.mkdir(savedir)

In [ ]:
n_epochs = 5
g_epoch = 0
epoch_list = []
loss_list = []
accuracy_list = []

fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
plt.ion()
fig.show()
fig.canvas.draw()
k = 0

for epoch in tqdm_notebook(range(n_epochs)):
    # for accuracy check after each epoch
    correct = 0
    seen = 0
    running_loss = 0.0
    g_epoch += 1
    for i, data in enumerate(train_loader):
        # get inputs and labels
        inputs, labels = data
        
        # wrap in Variable
        inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
        
        # zero the gradients
        optimizer.zero_grad()
        
        # forward + backward + optimize
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.data[0]
        seen += len(inputs)
        correct += (labels == outputs.max(1)[1]).sum().data[0]
        
    epoch_list.append(epoch)
    if not loss_list:
        last_loss = 9999999
    else:
        last_loss = loss_list[-1]
    loss_list.append(running_loss)
    accuracy_list.append(float(correct)/float(seen))
    ax1.clear()
    ax1.plot(epoch_list, loss_list)
    ax2.clear()
    ax2.plot(epoch_list, accuracy_list)
    plt.title("Epoch: {0}, loss on left, accuracy on right".format(epoch))
    fig.canvas.draw()
    if abs((last_loss-running_loss)/last_loss) < .03 and epoch > 0:
        k += 1
        if k >= 3:
            k = 0
            optimizer.param_groups[0]['lr'] *= .85
            print('dropped lr to {0} at epoch {1}'.format(optimizer.param_groups[0]['lr'], epoch+1))
    else:
        k = 0

    if (epoch+1) % 20 == 0:
        torch.save(model.state_dict(), os.path.join(savedir, model_name+'_{0}'.format(epoch+1)))

In [ ]:
print(seen)

In [ ]:
print(correct)

Save Weights


In [ ]:
savedir = os.path.join(path, 'save_{0}'.format(model_name))
if not os.path.isdir(savedir):
    os.mkdir(savedir)

In [ ]:
torch.save(model.state_dict(), os.path.join(savedir, 'epoch_{0}.pth'.format(g_epoch)))

Resume training


In [ ]:
saves = os.listdir(savedir)
max_len = len(max(saves, key=len))
recent_saves = [x for x in saves if len(x) == max_len]
recent_saves.sort()
last_save = recent_saves[-1]

model = models.vgg19_bn(pretrained=True)
model.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(4096, n_classes),
        )

model.load_state_dict(torch.load(os.path.join(savedir, last_save)))

In [ ]:
# enable cuda if available
if(use_cuda):
    model.cuda()
    criterion.cuda()

for epoch in range(4):
    running_loss = 0.0
    g_epoch += 1
    for i, data in enumerate(train_loader):
        # get inputs and labels
        inputs, labels = data
        
        # wrap in Variable
        inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
        
        # zero the gradients
        optimizer.zero_grad()
        
        # forward + backward + optimize
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.data[0]
        if i % 5 == 0:    # print every minibatch
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, i + 1, running_loss))
            running_loss = 0.0

print('Finished Training')

Visually validate the classifier


In [ ]:
valid_dataset = get_dataset(validdir, tsfm=comp_tsfm)
valid_loader = get_loader(valid_dataset, use_cuda, batch_size=4)
valid_iter = iter(valid_loader)

In [ ]:
# Define some helper functions
def get_classes_strings(classes, labels_ids):
    # returns the classes in string format
    return [classes[label_id] for label_id in labels_ids]

def get_prediction_classes_ids(predictions):
    # returns the predictions in id format
    predictions_ids = predictions.cpu().data.numpy().argmax(1)
    return predictions_ids

def get_prediction_classes_strings(classes, predictions):
    # returns the predictions in string format
    return get_classes_strings(classes, get_prediction_classes_ids(predictions))

def show(img):
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
    
rvrs_tsfm_tensor = transforms.Compose([
    UnNormalize(mean, std),
])    

def predictions_vs_actuals(iterator, use_cuda, model):
    model.eval()
    images, labels = iterator.next()
    img_list = [rvrs_tsfm_tensor(img) for img in images]
    labels_string = get_classes_strings(classes, labels.numpy())
    print('Actuals: ', labels_string)
    show(make_grid(img_list, padding=100))
    # display the predictons for the images above
    if use_cuda:
        images = images.cuda()
    predictions = model(Variable(images))
    predictions_string = get_prediction_classes_strings(classes, predictions)
    print('Predictions: ', predictions_string) 
    
def check_accuracy(data_loader, use_cuda, model):
    model.eval()
    correct = 0
    seen = 0
    total_len = len(data_loader)
    for i, data in enumerate(data_loader):
        images, labels = data
        seen += len(images)
        predictions = model(Variable(images.cuda()))
        # labels is tensor, predictions is variable; predictions pull data out to numpy
        correct += (labels.numpy() == predictions.max(1)[1].cpu().data.numpy()).sum() #predictions.max(1)[1] returns indicies of max preds
    print('Accuracy: {0}, Saw: {1}, Correct: {2}'.format(correct/seen, seen, correct))

In [ ]:
predictions_vs_actuals(valid_iter, use_cuda, model)

In [ ]:
check_accuracy(valid_loader, use_cuda, model)

Make Dataset from Test images


In [ ]:
sample_submission = pd.read_csv(path + '/sample_submission.csv')

In [ ]:
class TestDataset(Dataset):
    """Args: path to dir, transforms"""
    def __init__(self, root_dir, transform=None):
        self.root_dir = root_dir
        self.transform = transform
        self.samples = [filename for filename in os.listdir(root_dir) if '.jpg' in filename]
        
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir, self.samples[idx])
        image = Image.open(img_name)
        
        if self.transform:
            image = self.transform(image)
            
        #image = Image.fromarray(image)
        return image, int(re.findall(r'\d+', self.samples[idx])[0])

In [ ]:
def predict_testset(data_loader, use_cuda, model, softmax = False):
    model.eval()
    ids_list = []
    predictions_list = []
    k = 0
    for i, data in enumerate(data_loader):
        images, ids = data
        k += len(ids)
        predictions = model(Variable(images.cuda()))
#         pdb.set_trace()
        # labels is tensor, predictions is variable; predictions pull data out to numpy
        ids_list.extend(ids)
        if softmax == False:
            predictions_list.extend(predictions.max(1)[1].cpu().data.numpy()) #predictions.max(1)[1] returns indicies of max preds
        else:
            predictions_list.extend(nn.functional.softmax(predictions).cpu().data.numpy())
#         except StopIteration:
    print('Finished predicting for {0} images'.format(k))
    return list(zip(ids_list, predictions_list))

def predictions_vs_pics(iterator, use_cuda, model):
    model.eval()
    images, _ = iterator.next()
    img_list = [rvrs_tsfm_tensor(img) for img in images]
    show(make_grid(img_list, padding=100))
    # display the predictons for the images above
    if use_cuda:
        images = images.cuda()
    predictions = model(Variable(images))
    predictions_string = get_prediction_classes_strings(classes, predictions)
    print('Predictions: ', predictions_string)

In [ ]:
test_dataset = TestDataset(testdir, comp_tsfm)
test_loader = get_loader(test_dataset, use_cuda)
test_iter = iter(test_loader)

In [ ]:
results = predict_testset(test_loader, use_cuda, model, softmax=True)

In [ ]:
results_dict = {}
for result in results:
    results_dict['img_{0}.jpg'.format(str(result[0]))] = np.clip(result[1], 0.001, 0.999)

In [ ]:
results_frame = pd.DataFrame.from_dict(results_dict, orient='index')
# results_frame.sort_values('id', inplace=True)
# results_frame['label'] = np.clip(results_frame['label'], 0.025, 0.975)

In [ ]:
sample_submission.head()

In [ ]:
results_frame.reset_index(inplace=True)
results_frame.columns = sample_submission.columns

In [ ]:
results_frame.to_csv(path + 'submission_statefarm.csv', index=False, index_label=False)

In [ ]:
from IPython.display import FileLink

In [ ]:
FileLink(path + 'submission_statefarm.csv')

In [ ]:
results_frame.shape

In [ ]:
len(os.listdir(testdir))

In [ ]:
ii