In [ ]:
import os, glob, platform, datetime, random
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from torch import functional as F
# import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
import cv2
from PIL import Image
from tensorboardX import SummaryWriter
import numpy as np
from numpy.linalg import inv as denseinv
from scipy import sparse
from scipy.sparse import lil_matrix, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.sparse.linalg import inv as spinv
import scipy.misc
from myimagefolder import MyImageFolder
from mymodel import GradientNet
from myargs import Args
In [ ]:
# https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349
# snapshot = torch.load('snapshot_store/concat/snapshot-239.pth.tar')
scene = 'market_6'
type_ = 'shading'
gradient = True
type2 = 'rgb' if gradient == False else 'gd'
frame = 'frame_0035'
root = '/media/lwp/xavier/graduation_results/showcase_model/{}/{}/{}/'.format(scene, type_, type2)
snapshot = torch.load(root+'snapshot-239.pth.tar')
In [ ]:
# state_dict = snapshot['state_dict']
# for k,v in enumerate(state_dict):
# print(k,v)
In [ ]:
# torch.cuda.set_device(0)
gpu_num = 1
state_dict = snapshot['state_dict']
args = snapshot['args']
densenet = models.__dict__[args.arch](pretrained=True).cuda(gpu_num)
# pretrained = PreTrainedModel(densenet).cuda(gpu_num)
# net = GradientNet(pretrained).cuda(gpu_num)
net = GradientNet(densenet=densenet, growth_rate=32,
transition_scale=2, pretrained_scale=4,
gradient=gradient).cuda(gpu_num)
net.load_state_dict(state_dict)
In [ ]:
# pretrained.train()
net.train()
In [ ]:
def loadimg(path):
im = Image.open(path).convert('RGB')
# im = im.resize((512, 256))
print(im.size)
im = transforms.ToTensor()(im)
# x = torch.zeros(1,3,416,32*22)
# x[0,:,:,:] = im[:,0:416,220:220+32*22]
x = torch.zeros(1,3,416,1024)
x[0,:,:,:] = im[:,0:416,:]
x = Variable(x)
return x
im = loadimg('/home/lwp/workspace/sintel2/clean/{}/{}.png'.format(scene, frame)).cuda(gpu_num)
# gt = cv2.imwrite('/home/cad/lwp/workspace/dataset/sintel2/albedo/alley_1/frame_0010.png')
# ft_pretrained = pretrained(im.cuda(3))
predict, mergeRGB = net(im.cuda(gpu_num), go_through_merge=True)
In [ ]:
if gradient == False:
merged = mergeRGB[5]
merged = merged[0]
merged = merged.cpu().data.numpy()
print (merged.shape)
merged = merged.transpose(1,2,0)
print (merged.shape)
dx = merged[:,:,0:3]
cv2.imwrite('out_merge.png', dx[:,:,::-1]*255)
In [ ]:
if gradient == True:
merged = mergeRGB[5]
merged = merged[0]
merged = merged.cpu().data.numpy()
print (merged.shape)
merged = merged.transpose(1,2,0)
print (merged.shape)
dy = merged[:,:,0:3]+0.5
dx = merged[:,:,3:6]+0.5
cv2.imwrite('out_merge_dx.png', dx[:,:,::-1]*255)
cv2.imwrite('out_merge_dy.png', dy[:,:,::-1]*255)
In [ ]:
len(mergeRGB)
In [ ]:
# %clear -a -f
In [ ]:
3647.*1024./416.
3951.*1024./416.