In [1]:
import os
import glob

import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as opti
from torch.autograd import Variable

import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms

import cv2
from PIL import Image

import numpy as np
from numpy.linalg import inv
from scipy import sparse
from scipy.sparse import lil_matrix, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.sparse.linalg import inv as spinv

print(torch.__version__)


0.1.10+ac9245a

Configurations


In [ ]:
class Args(object):
    pass
args = Args()
args.epoches = 20
args.epoches_unary_threshold = 0
args.base_lr = 1e-5
args.train_dir = '/home/albertxavier/dataset/sintel/images/'
args.arch = "resnet18"
args.img_extentions = ["png",'jpg']
args.image_w = 256
args.image_h = 256

Custom DataLoader


In [ ]:
def default_loader(path):
    return Image.open(path).convert('RGB')

def make_dataset(dir):
    images_paths = glob.glob(os.path.join(dir, 'clean', '*', '*.png'))
    albedo_paths = images_paths[:]
    shading_paths = images_paths[:]
    pathes = []
    for img_path in images_paths:
        sp = img_path.split('/'); sp[-3] = 'albedo'; sp = ['/'] + sp; albedo_path = os.path.join(*sp)
        sp = img_path.split('/'); sp[-3] = 'albedo'; sp = ['/'] + sp; shading_path = os.path.join(*sp)
        pathes.append((img_path, albedo_path, shading_path))
    return pathes

class MyImageFolder(data_utils.Dataset):
    def __init__(self, root, transform=None, target_transform=None,
                loader=default_loader):
        imgs = make_dataset(root)
        if len(imgs) == 0:
            raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
                               "Supported image extensions are: " + ",".join(args.img_extentions)))

        self.root = root
        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader
        
    def __getitem__(self, index):
        img_path, albedo_path, shading_path = self.imgs[index]
        
        img = self.loader(img_path)
        albedo = self.loader(albedo_path)
        shading = self.loader(shading_path)
        
        if self.transform is not None: img = self.transform(img)
        if self.transform is not None: albedo = self.transform(albedo)
        if self.transform is not None: shading = self.transform(shading)

        return img, albedo, shading
    
    def __len__(self):
        return len(self.imgs)
    
dataset= MyImageFolder(args.train_dir, 
                       transforms.Compose(
        [transforms.RandomCrop((args.image_h, args.image_w)),
         transforms.ToTensor()]
    ))

train_loader =data_utils.DataLoader(dataset,1,True,num_workers=1)

In [ ]:
# Pretrained Model

In [ ]:
class CommonModel(nn.Module):
    def __init__(self, original_model, arch):
        super(CommonModel, self).__init__()
        if arch.startswith('resnet') :
            self.unary_2M = nn.Sequential(*list(original_model.children())[0:7])
            self.unary_1M = nn.Sequential(*list(original_model.children())[7:8])
    def forward(self, x):
        _2M = self.unary_2M(x) 
        _1M = self.unary_1M(_2M)
        return _2M, _1M