In [1]:
import os, glob, platform, datetime, random
from collections import OrderedDict

import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
# from torch import functional as F
import torch.nn.functional as F
# import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms

import cv2
from PIL import Image
from tensorboardX import SummaryWriter

import numpy as np
from numpy.linalg import inv as denseinv
from scipy import sparse
from scipy.sparse import lil_matrix, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.sparse.linalg import inv as spinv
import scipy.misc

from myimagefoldereccv import MyImageFolder
from mymodel import GradientNet
from myargs import Args
from myutils import MyUtils

Configurations


In [2]:
myutils = MyUtils()

args = Args()
args.arch = "densenet121"
args.epoches = 500
args.epoches_unary_threshold = 0
args.image_h = 256
args.image_w = 256
args.img_extentions = ["png"]
args.training_thresholds = [250,200,150,50,0,300]
args.base_lr = 1
args.lr = args.base_lr
args.snapshot_interval = 5000
args.debug = True


# growth_rate = (4*(2**(args.gpu_num)))
transition_scale=2
pretrained_scale=4
growth_rate = 32

#######
args.test_scene = ['alley_1', 'bamboo_1', 'bandage_1', 'cave_2', 'market_2', 'market_6', 'shaman_2', 'sleeping_1', 'temple_2']
gradient=False
args.gpu_num = 0
#######

writer_comment = 'eccv_albedo'


offset = 0.
if gradient == True: offset = 0.5

args.display_interval = 50
args.display_curindex = 0

system_ = platform.system()
system_dist, system_version, _ = platform.dist()
if system_ == "Darwin": 
    args.train_dir = '/Volumes/Transcend/dataset/sintel2'
    args.pretrained = False
    args.image_w, args.image_h = 32, 32
elif platform.dist() ==  ('debian', 'jessie/sid', ''):
    args.train_dir = '/home/albertxavier/dataset/sintel2'
    args.pretrained = True
elif platform.dist() == ('debian', 'stretch/sid', ''):
    args.train_dir = '/home/cad/lwp/workspace/dataset/sintel2'
    args.pretrained = True

if platform.system() == 'Linux': use_gpu = True
else: use_gpu = False

if use_gpu:
    torch.cuda.set_device(args.gpu_num)
    

print(platform.dist())


('debian', 'jessie/sid', '')

My DataLoader


In [3]:
train_dataset = MyImageFolder(args.train_dir, 'train',
                       transforms.Compose(
        [transforms.ToTensor()]
    ), random_crop=True, 
    img_extentions=args.img_extentions, test_scene=args.test_scene, image_h=args.image_h, image_w=args.image_w)
test_dataset = MyImageFolder(args.train_dir, 'test', 
                       transforms.Compose(
        [transforms.CenterCrop((args.image_h, args.image_w)),
         transforms.ToTensor()]
    ), random_crop=False,
    img_extentions=args.img_extentions, test_scene=args.test_scene, image_h=args.image_h, image_w=args.image_w)

train_loader = data_utils.DataLoader(train_dataset,1,True,num_workers=1)
test_loader = data_utils.DataLoader(test_dataset,1,True,num_workers=1)

Load Pretrained Model

Defination

  • DenseNet-121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16)
    • First Convolution: 32M -> 16M -> 8M
    • every transition: 8M -> 4M -> 2M (downsample 1/2, except the last block)

In [4]:
densenet = models.__dict__[args.arch](pretrained=args.pretrained)

for param in densenet.parameters():
    param.requires_grad = False

if use_gpu: densenet.cuda()

In [5]:
args.display_curindex = 0
args.base_lr = 0.01
args.display_interval = 20
args.momentum = 0.9
args.epoches = int(60*4)
#args.training_thresholds = 240//4
args.power = 0.5



net = GradientNet(densenet=densenet, growth_rate=growth_rate, 
                  transition_scale=transition_scale, pretrained_scale=pretrained_scale,
                 gradient=gradient)
if use_gpu:
    net.cuda()

mse_loss = nn.MSELoss().cuda() if use_gpu==True else nn.MSELoss()
mse_crf_loss = nn.MSELoss().cuda() if use_gpu==True else nn.MSELoss()

parameters = filter(lambda p: p.requires_grad, net.parameters())
optimizer = optim.SGD(parameters, lr=args.base_lr, momentum=args.momentum)

In [6]:
def generate_y(predict_unary, predict_dx, predict_dy, gt, predict_alpha, predict_beta, max_iter=100, eps=1.e-4, use_gpu=True, volatile=False):
    def generate_y_(last_y, predict_unary, predict_dx, predict_dy, gt, predict_alpha, predict_beta, use_gpu=True):
        def prepare_fileters(direction='up'):
            filters = torch.Tensor(torch.zeros(3,3,3,3))
            if direction == 'up': 
                for i in range(3): filters[i,i,0,1] = 1.
            elif direction == 'down': 
                for i in range(3): filters[i,i,2,1] = 1.
            elif direction == 'left': 
                for i in range(3): filters[i,i,1,0] = 1.
            else: 
                for i in range(3): filters[i,i,1,2] = 1.
            filters = Variable(filters)
            if use_gpu == True: filters = filters.cuda()
            return filters

        f_up = prepare_fileters(direction='up')
        f_down = prepare_fileters(direction='down')
        f_left = prepare_fileters(direction='left')
        f_right = prepare_fileters(direction='right')

        last_y_up = F.conv2d(last_y, f_up, padding=1)
        last_y_down = F.conv2d(last_y, f_down, padding=1)
        last_y_left = F.conv2d(last_y, f_left, padding=1)
        last_y_right = F.conv2d(last_y, f_right, padding=1)
        
        t_up = F.conv2d(predict_dy, f_up, padding=1)
        t_down = -predict_dy
        t_left = F.conv2d(predict_dx, f_left, padding=1)
        t_right = -predict_dx
        
        beta_up = predict_beta[:,0:1,:,:]
        beta_down = predict_beta[:,1:2,:,:]
        beta_left = predict_beta[:,2:3,:,:]
        beta_right = predict_beta[:,3:4,:,:]
        
        sum_beta = beta_up + beta_down + beta_left + beta_right
        constant = predict_alpha + sum_beta
        #print('constant', constant)
        
        # y = (predict_alpha * predict_unary + \
        #     beta_up * (last_y_up + t_up) + \
        #     beta_down * (last_y_down + t_down) + \
        #      beta_left * (last_y_left + t_left) + beta_right * (last_y_right + t_right))/constant

        y = predict_alpha * predict_unary
        y = y + last_y_up    + beta_up    * t_up
        y = y + last_y_down  + beta_down  * t_down
        y = y + last_y_left  + beta_left  * t_left
        y = y + last_y_right + beta_right * t_right
        y = y / 5.
        return y
    
    predict_unary = predict_unary.clone()
    predict_dx = predict_dx.clone()
    predict_dy = predict_dy.clone()
    
    #y = Variable(predict_unary.data.cpu().clone()+torch.rand(predict_unary.size())/10., volatile=True).cuda()
    y = Variable(predict_unary.data.clone())
        
    if use_gpu == True: y = y.cuda()
    iters = 0
    while 1:
        last_y = y.clone()
        y = generate_y_(y, predict_unary, predict_dx, predict_dy, gt, predict_alpha, predict_beta, use_gpu=use_gpu)
        cur_loss = myutils.mse_loss_scalar(y, last_y)
        if cur_loss <= eps: 
            #print('cur loss', cur_loss)
            #print('cur iter', iters)
            #print('y min', y.min(), 'max', y.max())
            break
        if iters >= max_iter: 
            #print('@break at max iter', cur_loss)
            break
        iters += 1
        #print('y min', y.min(), 'max', y.max())
        break
    return y

In [7]:
def crf_loss(y, predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta, volatile=False):
#     return torch.cat([y],1)
    def filter_gen(direction='x'):
        filters = torch.Tensor(torch.zeros(3,3,3,3))
        if use_gpu == True: filters = filters.cuda()
        for i in range(3):
            filters[i,i,1,1] = -1.
        if direction == 'x':
            for i in range(3):
                filters[i,i,1,2] = 1.
        else:
            for i in range(3):
                filters[i,i,2,1] = 1.
        filters = Variable(filters)
        return filters
    
    def prepare_fileters(direction='up'):
            filters = torch.Tensor(torch.zeros(3,3,3,3))
            if direction == 'up': 
                for i in range(3): filters[i,i,0,1] = 1.
            elif direction == 'down': 
                for i in range(3): filters[i,i,2,1] = 1.
            elif direction == 'left': 
                for i in range(3): filters[i,i,1,0] = 1.
            else: 
                for i in range(3): filters[i,i,1,2] = 1.
            filters = Variable(filters)
            if use_gpu == True: filters = filters.cuda()
            return filters

    predict_unary = predict_unary.clone()
    predict_dx = predict_dx.clone()
    predict_dy = predict_dy.clone()
    
    if volatile ==True:
        predict_alpha = predict_alpha.clone()
        predict_beta = predict_beta.clone()


    f_up = prepare_fileters(direction='up')
    f_down = prepare_fileters(direction='down')
    f_left = prepare_fileters(direction='left')
    f_right = prepare_fileters(direction='right')

    beta_up = predict_beta[:,0,:,:]
    beta_down = predict_beta[:,1,:,:]
    beta_left = predict_beta[:,2,:,:]
    beta_right = predict_beta[:,3,:,:]
    
    f_dx = filter_gen(direction='x')
    f_dy = filter_gen(direction='y')
    
    J1 = (y - predict_alpha * predict_unary)**2
    J2 = (((y - F.conv2d(y, f_up, padding=1) - beta_up * F.conv2d(predict_dy, f_up, padding=1))**2))
    J3 = (((y - F.conv2d(y, f_down, padding=1) + beta_down * predict_dy)**2))
    J4 = (((y - F.conv2d(y, f_left, padding=1) - beta_left * F.conv2d(predict_dx, f_left, padding=1))**2))
    J5 = (((y - F.conv2d(y, f_right, padding=1) + beta_right * predict_dx)**2))
    J = torch.cat([J1,J2,J3,J4,J5],1) 
    
    return J

In [8]:
def train_eval_model_per_epoch(epoch, net, args, train_loader, test_loader, phase='train'):
    if phase == 'train':
        volatile = False
        net.train()
    else:
        volatile = True
#         net.eval()
        net.train()
    
    print('epoch: {} [{}]'.format(epoch, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))

    """adjust learning rate"""
    myutils.adjust_learning_rate(optimizer, args, epoch, beg=0, end=args.epoches)
    #if epoch < args.training_thresholds: 
    #    myutils.adjust_learning_rate(optimizer, args, epoch, beg=0, end=args.training_thresholds-1)
    #else:
    #    myutils.adjust_learning_rate(optimizer, args, epoch, beg=args.training_thresholds, end=args.epoches)
    writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], global_step=epoch)

    """init statics"""
    run_loss_unary = 0.
    run_loss_dx = 0.
    run_loss_dy = 0.
    run_loss_y = 0.
    run_loss_crf = 0.
    run_cnt   = 0.00001

    """for all training/test data"""
    loader = train_loader if phase == 'train' else test_loader
    
    for ind, data in enumerate(loader, 0):
        """prepare data"""
        input_img, gt_albedo, gt_shading, cur_scene, img_path = data
        (cur_scene,) = cur_scene
        (img_path,) = img_path
        cur_frame = img_path.split('/')[-1]
        input_img = Variable(input_img, volatile=volatile)
        gt_albedo = Variable(gt_albedo, requires_grad=False)
        gt_shading = Variable(gt_shading)
        if use_gpu: 
            input_img, gt_albedo, gt_shading = input_img.cuda(), gt_albedo.cuda(), gt_shading.cuda()
        
        """prepare gradient"""
        gt_dx = myutils.makeGradientTorch(gt_albedo, direction='x', use_gpu=use_gpu)
        gt_dy = myutils.makeGradientTorch(gt_albedo, direction='y', use_gpu=use_gpu)
        
        if phase == 'train':
            optimizer.zero_grad()
        
        predict_all = net(input_img)
        predict_unary = predict_all[:,0:3,:,:]
        predict_dx = predict_all[:,3:6,:,:]
        predict_dy = predict_all[:,6:9,:,:]
        predict_alpha = predict_all[:,9:10,:,:]
        predict_beta = predict_all[:,9:13,:,:]
        
        #print('alpha', predict_alpha.min(), predict_beta.max())
        #print('beta ', predict_beta.min(), predict_beta.max())
        
        y = None
        crf_loss_y = None
        crf_loss_gt = None
        
        """prepare crf y"""
        y = generate_y(predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta, use_gpu=use_gpu, volatile=volatile)
        #y = Variable(y.data.clone(), requires_grad=False).cuda()

        #print('y', y.min(), y.max())


        """prepare crf loss"""
        #crf_loss_y = crf_loss(y, predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta)
        # crf_loss_y = crf_loss(predict_dx, predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta)
        #crf_loss_gt = crf_loss(gt_albedo, predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta, volatile=True)
        # crf_loss_gt = crf_loss(predict_dy, predict_unary, predict_dx, predict_dy, gt_albedo, predict_alpha, predict_beta)
        #crf_loss_gt = Variable(crf_loss_gt.data.cpu(), requires_grad=False).cuda()
        
        """prepare final gt"""
        predict_final = None
        gt_final = None
        predict_final = torch.cat([predict_all[:,0:3+6,:,:], y], 1)
        gt_final = torch.cat([gt_albedo, gt_dx, gt_dy, gt_albedo], 1)
            
        
        """compute loss"""
        loss = mse_loss(predict_final, gt_final)
        # c_loss = mse_crf_loss(predict_dx, predict_dy)
        
        run_loss_unary += myutils.mse_loss_scalar(predict_unary, gt_albedo)
        run_loss_dx += myutils.mse_loss_scalar(predict_dx, gt_dx)
        run_loss_dy += myutils.mse_loss_scalar(predict_dy, gt_dy)
        run_loss_y += myutils.mse_loss_scalar(y, gt_albedo)
        #run_loss_crf += myutils.mse_loss_scalar(crf_loss_y, 0)
        run_cnt += 1

        """backward"""
        if phase == 'train':
            loss.backward()
            # c_loss.backward()
            optimizer.step()
        
        """generate display img"""
        display_im = myutils.tensor2Numpy(input_img)[:,:,::-1]*255
        display_gt_albedo = myutils.tensor2Numpy(gt_albedo)[:,:,::-1]*255
        display_gt_dx = (myutils.tensor2Numpy(gt_dx)[:,:,::-1]+0.5)*255
        display_gt_dy = (myutils.tensor2Numpy(gt_dy)[:,:,::-1]+0.5)*255
        display_unary = myutils.tensor2Numpy(predict_unary)[:,:,::-1]*255
        display_dx = (myutils.tensor2Numpy(predict_dx)[:,:,::-1]+0.5)*255
        display_dy = (myutils.tensor2Numpy(predict_dy)[:,:,::-1]+0.5)*255
        display_y = (myutils.tensor2Numpy(y)[:,:,::-1])*255

        """display"""
        if (phase == 'train' and args.display_curindex % args.display_interval == 0) or \
        (phase == 'test' and cur_scene == 'alley_1' and cur_frame == 'frame_0001.png'):
            # print('display ', phase, img_path, display_im.shape)
            cv2.imwrite('snapshot{}/input.png'.format(args.gpu_num), display_im)
            cv2.imwrite('snapshot{}/{}-gt-{}-unary.png'.format(args.gpu_num, phase, epoch), display_gt_albedo) 
            cv2.imwrite('snapshot{}/{}-gt-{}-dx.png'.format(args.gpu_num, phase, epoch), display_gt_dx) 
            cv2.imwrite('snapshot{}/{}-gt-{}-dy.png'.format(args.gpu_num, phase, epoch), display_gt_dy) 
            cv2.imwrite('snapshot{}/{}-rs-{}-unary.png'.format(args.gpu_num, phase, epoch), display_unary)
            cv2.imwrite('snapshot{}/{}-rs-{}-dx.png'.format(args.gpu_num, phase, epoch), display_dx)
            cv2.imwrite('snapshot{}/{}-rs-{}-dy.png'.format(args.gpu_num, phase, epoch), display_dy)
            cv2.imwrite('snapshot{}/{}-rs-{}-y.png'.format(args.gpu_num, phase, epoch), display_y)
        
        args.display_curindex += 1
    
    """output loss"""
    loss_output = ''
    loss_output += '{} loss: '.format(phase)
    loss_output += 'unary: %6f ' % (run_loss_unary/run_cnt)
    loss_output += 'pairwise: %6f ' % ((run_loss_dx+run_loss_dy)/run_cnt)
    #loss_output += 'crf: %6f ' % (run_loss_crf/run_cnt)
    loss_output += 'y: %6f ' % (run_loss_y/run_cnt)
    
    print(loss_output)
    
    """write to tensorboard"""
    writer.add_scalars('loss', {
        '%s unary loss'% (phase): np.array([run_loss_unary/run_cnt]),
        '%s dx loss'% (phase): np.array([run_loss_dx/run_cnt]),
        '%s dy loss'% (phase): np.array([run_loss_dy/run_cnt]),
        '%s pairwise loss'% (phase): np.array([(run_loss_dx+run_loss_dy)/run_cnt])
        #'%s y loss'% (phase): np.array([run_loss_y/run_cnt]),
    }, global_step=epoch)
    
    """save snapshot"""
    if phase == 'train':
        myutils.save_snapshot(epoch, args, net, optimizer)

In [9]:
"""training loop"""
writer = SummaryWriter(comment='-{}'.format(writer_comment))

for epoch in range(args.epoches):
    phase = 'test' if (epoch+1) % 5 == 0 else 'train'
    train_eval_model_per_epoch(epoch, net, args, train_loader, test_loader, phase=phase)


epoch: 0 [2018-02-19 00:55:26]
train loss: unary: 0.064454 pairwise: 0.011692 y: 0.064844 
epoch: 1 [2018-02-19 00:58:49]
train loss: unary: 0.051616 pairwise: 0.007777 y: 0.051788 
epoch: 2 [2018-02-19 01:02:00]
train loss: unary: 0.040032 pairwise: 0.006333 y: 0.040420 
epoch: 3 [2018-02-19 01:05:11]
train loss: unary: 0.035519 pairwise: 0.005698 y: 0.036175 
epoch: 4 [2018-02-19 01:08:21]
test loss: unary: 0.037808 pairwise: 0.005209 y: 0.042884 
epoch: 5 [2018-02-19 01:09:22]
train loss: unary: 0.029033 pairwise: 0.005583 y: 0.029579 
epoch: 6 [2018-02-19 01:12:32]
train loss: unary: 0.024924 pairwise: 0.005270 y: 0.025641 
epoch: 7 [2018-02-19 01:15:43]
train loss: unary: 0.022993 pairwise: 0.004912 y: 0.023620 
epoch: 8 [2018-02-19 01:18:53]
train loss: unary: 0.022130 pairwise: 0.004810 y: 0.022798 
epoch: 9 [2018-02-19 01:22:04]
test loss: unary: 0.034786 pairwise: 0.004502 y: 0.037226 
epoch: 10 [2018-02-19 01:23:04]
train loss: unary: 0.019437 pairwise: 0.004842 y: 0.019980 
epoch: 11 [2018-02-19 01:26:16]
train loss: unary: 0.018624 pairwise: 0.004606 y: 0.019164 
epoch: 12 [2018-02-19 01:29:27]
train loss: unary: 0.017020 pairwise: 0.004606 y: 0.017508 
epoch: 13 [2018-02-19 01:32:43]
train loss: unary: 0.015812 pairwise: 0.004319 y: 0.016262 
epoch: 14 [2018-02-19 01:35:56]
test loss: unary: 0.038959 pairwise: 0.004338 y: 0.038295 
epoch: 15 [2018-02-19 01:36:58]
train loss: unary: 0.015894 pairwise: 0.004522 y: 0.016352 
epoch: 16 [2018-02-19 01:40:12]
train loss: unary: 0.015843 pairwise: 0.004507 y: 0.016267 
epoch: 17 [2018-02-19 01:43:22]
train loss: unary: 0.014506 pairwise: 0.004282 y: 0.014909 
epoch: 18 [2018-02-19 01:46:33]
train loss: unary: 0.014626 pairwise: 0.004421 y: 0.014977 
epoch: 19 [2018-02-19 01:49:44]
test loss: unary: 0.032142 pairwise: 0.004235 y: 0.032984 
epoch: 20 [2018-02-19 01:50:45]
train loss: unary: 0.013665 pairwise: 0.004298 y: 0.013982 
epoch: 21 [2018-02-19 01:53:54]
train loss: unary: 0.013887 pairwise: 0.004153 y: 0.014206 
epoch: 22 [2018-02-19 01:57:05]
train loss: unary: 0.013268 pairwise: 0.004413 y: 0.013602 
epoch: 23 [2018-02-19 02:00:15]
train loss: unary: 0.012401 pairwise: 0.004017 y: 0.012682 
epoch: 24 [2018-02-19 02:03:25]
test loss: unary: 0.032619 pairwise: 0.004185 y: 0.032430 
epoch: 25 [2018-02-19 02:04:26]
train loss: unary: 0.012296 pairwise: 0.004235 y: 0.012580 
epoch: 26 [2018-02-19 02:07:36]
train loss: unary: 0.011755 pairwise: 0.004283 y: 0.011994 
epoch: 27 [2018-02-19 02:10:46]
train loss: unary: 0.011246 pairwise: 0.003955 y: 0.011520 
epoch: 28 [2018-02-19 02:13:57]
train loss: unary: 0.011001 pairwise: 0.003887 y: 0.011236 
epoch: 29 [2018-02-19 02:17:07]
test loss: unary: 0.032908 pairwise: 0.004155 y: 0.032573 
epoch: 30 [2018-02-19 02:18:07]
train loss: unary: 0.010710 pairwise: 0.003944 y: 0.010942 
epoch: 31 [2018-02-19 02:21:17]
train loss: unary: 0.010725 pairwise: 0.004150 y: 0.010945 
epoch: 32 [2018-02-19 02:24:27]
train loss: unary: 0.009918 pairwise: 0.004060 y: 0.010128 
epoch: 33 [2018-02-19 02:27:37]
train loss: unary: 0.010042 pairwise: 0.004191 y: 0.010246 
epoch: 34 [2018-02-19 02:30:48]
test loss: unary: 0.031742 pairwise: 0.004139 y: 0.031920 
epoch: 35 [2018-02-19 02:31:50]
train loss: unary: 0.010007 pairwise: 0.003980 y: 0.010221 
epoch: 36 [2018-02-19 02:35:00]
train loss: unary: 0.009593 pairwise: 0.004011 y: 0.009775 
epoch: 37 [2018-02-19 02:38:10]
train loss: unary: 0.010444 pairwise: 0.003972 y: 0.010620 
epoch: 38 [2018-02-19 02:41:20]
train loss: unary: 0.009438 pairwise: 0.003893 y: 0.009644 
epoch: 39 [2018-02-19 02:44:31]
test loss: unary: 0.032075 pairwise: 0.004120 y: 0.031675 
epoch: 40 [2018-02-19 02:45:32]
train loss: unary: 0.009646 pairwise: 0.003968 y: 0.009843 
epoch: 41 [2018-02-19 02:48:42]
train loss: unary: 0.009105 pairwise: 0.003921 y: 0.009266 
epoch: 42 [2018-02-19 02:51:53]
train loss: unary: 0.008974 pairwise: 0.003974 y: 0.009151 
epoch: 43 [2018-02-19 02:55:03]
train loss: unary: 0.008620 pairwise: 0.003845 y: 0.008753 
epoch: 44 [2018-02-19 02:58:14]
test loss: unary: 0.032987 pairwise: 0.004094 y: 0.032545 
epoch: 45 [2018-02-19 02:59:15]
train loss: unary: 0.008482 pairwise: 0.003794 y: 0.008651 
epoch: 46 [2018-02-19 03:02:25]
train loss: unary: 0.008537 pairwise: 0.003914 y: 0.008682 
epoch: 47 [2018-02-19 03:05:36]
train loss: unary: 0.008432 pairwise: 0.003852 y: 0.008597 
epoch: 48 [2018-02-19 03:08:47]
train loss: unary: 0.008045 pairwise: 0.003812 y: 0.008183 
epoch: 49 [2018-02-19 03:11:58]
test loss: unary: 0.030748 pairwise: 0.004072 y: 0.031192 
epoch: 50 [2018-02-19 03:13:00]
train loss: unary: 0.008031 pairwise: 0.003857 y: 0.008152 
epoch: 51 [2018-02-19 03:16:10]
train loss: unary: 0.007907 pairwise: 0.003685 y: 0.008035 
epoch: 52 [2018-02-19 03:19:21]
train loss: unary: 0.008208 pairwise: 0.003792 y: 0.008339 
epoch: 53 [2018-02-19 03:22:31]
train loss: unary: 0.007747 pairwise: 0.003730 y: 0.007898 
epoch: 54 [2018-02-19 03:25:41]
test loss: unary: 0.031580 pairwise: 0.004088 y: 0.032315 
epoch: 55 [2018-02-19 03:26:42]
train loss: unary: 0.007623 pairwise: 0.003626 y: 0.007751 
epoch: 56 [2018-02-19 03:29:53]
train loss: unary: 0.007226 pairwise: 0.003672 y: 0.007362 
epoch: 57 [2018-02-19 03:33:03]
train loss: unary: 0.007292 pairwise: 0.003652 y: 0.007400 
epoch: 58 [2018-02-19 03:36:14]
train loss: unary: 0.007242 pairwise: 0.003651 y: 0.007340 
epoch: 59 [2018-02-19 03:39:25]
test loss: unary: 0.031157 pairwise: 0.004058 y: 0.031746 
epoch: 60 [2018-02-19 03:40:25]
train loss: unary: 0.007210 pairwise: 0.003564 y: 0.007328 
epoch: 61 [2018-02-19 03:43:35]
train loss: unary: 0.007046 pairwise: 0.003593 y: 0.007149 
epoch: 62 [2018-02-19 03:46:46]
train loss: unary: 0.007343 pairwise: 0.003784 y: 0.007462 
epoch: 63 [2018-02-19 03:49:57]
train loss: unary: 0.006828 pairwise: 0.003608 y: 0.006939 
epoch: 64 [2018-02-19 03:53:07]
test loss: unary: 0.030526 pairwise: 0.004042 y: 0.031024 
epoch: 65 [2018-02-19 03:54:09]
train loss: unary: 0.006591 pairwise: 0.003653 y: 0.006690 
epoch: 66 [2018-02-19 03:57:19]
train loss: unary: 0.006651 pairwise: 0.003572 y: 0.006750 
epoch: 67 [2018-02-19 04:00:29]
train loss: unary: 0.006696 pairwise: 0.003529 y: 0.006797 
epoch: 68 [2018-02-19 04:03:39]
train loss: unary: 0.006886 pairwise: 0.003535 y: 0.006970 
epoch: 69 [2018-02-19 04:06:50]
test loss: unary: 0.029552 pairwise: 0.004028 y: 0.029892 
epoch: 70 [2018-02-19 04:07:51]
train loss: unary: 0.006687 pairwise: 0.003618 y: 0.006783 
epoch: 71 [2018-02-19 04:11:01]
train loss: unary: 0.006374 pairwise: 0.003638 y: 0.006460 
epoch: 72 [2018-02-19 04:14:11]
train loss: unary: 0.006503 pairwise: 0.003596 y: 0.006590 
epoch: 73 [2018-02-19 04:17:21]
train loss: unary: 0.006337 pairwise: 0.003581 y: 0.006422 
epoch: 74 [2018-02-19 04:20:32]
test loss: unary: 0.028311 pairwise: 0.004017 y: 0.028397 
epoch: 75 [2018-02-19 04:21:32]
train loss: unary: 0.006365 pairwise: 0.003570 y: 0.006442 
epoch: 76 [2018-02-19 04:24:44]
train loss: unary: 0.006228 pairwise: 0.003481 y: 0.006310 
epoch: 77 [2018-02-19 04:27:54]
train loss: unary: 0.006069 pairwise: 0.003417 y: 0.006152 
epoch: 78 [2018-02-19 04:31:04]
train loss: unary: 0.006078 pairwise: 0.003419 y: 0.006156 
epoch: 79 [2018-02-19 04:34:14]
test loss: unary: 0.029924 pairwise: 0.004013 y: 0.030213 
epoch: 80 [2018-02-19 04:35:15]
train loss: unary: 0.006004 pairwise: 0.003365 y: 0.006079 
epoch: 81 [2018-02-19 04:38:26]
train loss: unary: 0.006375 pairwise: 0.003509 y: 0.006457 
epoch: 82 [2018-02-19 04:41:36]
train loss: unary: 0.006195 pairwise: 0.003375 y: 0.006279 
epoch: 83 [2018-02-19 04:44:47]
train loss: unary: 0.006029 pairwise: 0.003493 y: 0.006104 
epoch: 84 [2018-02-19 04:47:57]
test loss: unary: 0.029689 pairwise: 0.003990 y: 0.030034 
epoch: 85 [2018-02-19 04:49:00]
train loss: unary: 0.005857 pairwise: 0.003490 y: 0.005933 
epoch: 86 [2018-02-19 04:52:10]
train loss: unary: 0.005934 pairwise: 0.003491 y: 0.006006 
epoch: 87 [2018-02-19 04:55:20]
train loss: unary: 0.005899 pairwise: 0.003468 y: 0.005961 
epoch: 88 [2018-02-19 04:58:31]
train loss: unary: 0.005982 pairwise: 0.003451 y: 0.006063 
epoch: 89 [2018-02-19 05:01:43]
test loss: unary: 0.030090 pairwise: 0.003995 y: 0.030248 
epoch: 90 [2018-02-19 05:02:45]
train loss: unary: 0.006016 pairwise: 0.003481 y: 0.006087 
epoch: 91 [2018-02-19 05:05:56]
train loss: unary: 0.005805 pairwise: 0.003390 y: 0.005878 
epoch: 92 [2018-02-19 05:09:06]
train loss: unary: 0.005785 pairwise: 0.003550 y: 0.005856 
epoch: 93 [2018-02-19 05:12:16]
train loss: unary: 0.005539 pairwise: 0.003371 y: 0.005606 
epoch: 94 [2018-02-19 05:15:27]
test loss: unary: 0.032007 pairwise: 0.004011 y: 0.032381 
epoch: 95 [2018-02-19 05:16:27]
train loss: unary: 0.005583 pairwise: 0.003254 y: 0.005653 
epoch: 96 [2018-02-19 05:19:38]
train loss: unary: 0.005578 pairwise: 0.003397 y: 0.005648 
epoch: 97 [2018-02-19 05:22:48]
train loss: unary: 0.005732 pairwise: 0.003474 y: 0.005798 
epoch: 98 [2018-02-19 05:25:59]
train loss: unary: 0.005426 pairwise: 0.003378 y: 0.005483 
epoch: 99 [2018-02-19 05:29:10]
test loss: unary: 0.030520 pairwise: 0.003968 y: 0.030823 
epoch: 100 [2018-02-19 05:30:11]
train loss: unary: 0.005473 pairwise: 0.003371 y: 0.005538 
epoch: 101 [2018-02-19 05:33:21]
train loss: unary: 0.005365 pairwise: 0.003518 y: 0.005429 
epoch: 102 [2018-02-19 05:36:31]
train loss: unary: 0.005258 pairwise: 0.003345 y: 0.005316 
epoch: 103 [2018-02-19 05:39:41]
train loss: unary: 0.005527 pairwise: 0.003341 y: 0.005591 
epoch: 104 [2018-02-19 05:42:51]
test loss: unary: 0.030625 pairwise: 0.003974 y: 0.030770 
epoch: 105 [2018-02-19 05:43:52]
train loss: unary: 0.005425 pairwise: 0.003449 y: 0.005483 
epoch: 106 [2018-02-19 05:47:02]
train loss: unary: 0.005320 pairwise: 0.003206 y: 0.005380 
epoch: 107 [2018-02-19 05:50:12]
train loss: unary: 0.005383 pairwise: 0.003332 y: 0.005445 
epoch: 108 [2018-02-19 05:53:23]
train loss: unary: 0.005262 pairwise: 0.003213 y: 0.005317 
epoch: 109 [2018-02-19 05:56:33]
test loss: unary: 0.029994 pairwise: 0.003954 y: 0.030208 
epoch: 110 [2018-02-19 05:57:35]
train loss: unary: 0.005045 pairwise: 0.003272 y: 0.005102 
epoch: 111 [2018-02-19 06:00:45]
train loss: unary: 0.005170 pairwise: 0.003221 y: 0.005231 
epoch: 112 [2018-02-19 06:03:56]
train loss: unary: 0.005155 pairwise: 0.003232 y: 0.005214 
epoch: 113 [2018-02-19 06:07:06]
train loss: unary: 0.005059 pairwise: 0.003350 y: 0.005115 
epoch: 114 [2018-02-19 06:10:16]
test loss: unary: 0.029811 pairwise: 0.003946 y: 0.029949 
epoch: 115 [2018-02-19 06:11:17]
train loss: unary: 0.005236 pairwise: 0.003247 y: 0.005290 
epoch: 116 [2018-02-19 06:14:28]
train loss: unary: 0.005135 pairwise: 0.003267 y: 0.005190 
epoch: 117 [2018-02-19 06:17:39]
train loss: unary: 0.004883 pairwise: 0.003083 y: 0.004925 
epoch: 118 [2018-02-19 06:20:50]
train loss: unary: 0.005009 pairwise: 0.003216 y: 0.005058 
epoch: 119 [2018-02-19 06:24:00]
test loss: unary: 0.029132 pairwise: 0.003955 y: 0.029207 
epoch: 120 [2018-02-19 06:25:01]
train loss: unary: 0.004882 pairwise: 0.003136 y: 0.004944 
epoch: 121 [2018-02-19 06:28:11]
train loss: unary: 0.004922 pairwise: 0.003187 y: 0.004973 
epoch: 122 [2018-02-19 06:31:21]
train loss: unary: 0.004940 pairwise: 0.003157 y: 0.004992 
epoch: 123 [2018-02-19 06:34:31]
train loss: unary: 0.004944 pairwise: 0.003313 y: 0.004996 
epoch: 124 [2018-02-19 06:37:42]
test loss: unary: 0.029464 pairwise: 0.003934 y: 0.029605 
epoch: 125 [2018-02-19 06:38:44]
train loss: unary: 0.004909 pairwise: 0.003152 y: 0.004959 
epoch: 126 [2018-02-19 06:41:54]
train loss: unary: 0.004987 pairwise: 0.003301 y: 0.005045 
epoch: 127 [2018-02-19 06:45:05]
train loss: unary: 0.004818 pairwise: 0.003186 y: 0.004869 
epoch: 128 [2018-02-19 06:48:15]
train loss: unary: 0.004857 pairwise: 0.003189 y: 0.004906 
epoch: 129 [2018-02-19 06:51:25]
test loss: unary: 0.029915 pairwise: 0.003932 y: 0.029952 
epoch: 130 [2018-02-19 06:52:26]
train loss: unary: 0.004872 pairwise: 0.003091 y: 0.004919 
epoch: 131 [2018-02-19 06:55:36]
train loss: unary: 0.004811 pairwise: 0.003129 y: 0.004863 
epoch: 132 [2018-02-19 06:58:46]
train loss: unary: 0.004879 pairwise: 0.003129 y: 0.004926 
epoch: 133 [2018-02-19 07:01:56]
train loss: unary: 0.004579 pairwise: 0.003012 y: 0.004627 
epoch: 134 [2018-02-19 07:05:07]
test loss: unary: 0.029291 pairwise: 0.003927 y: 0.029311 
epoch: 135 [2018-02-19 07:06:07]
train loss: unary: 0.004740 pairwise: 0.003046 y: 0.004794 
epoch: 136 [2018-02-19 07:09:18]
train loss: unary: 0.004681 pairwise: 0.003140 y: 0.004726 
epoch: 137 [2018-02-19 07:12:28]
train loss: unary: 0.004672 pairwise: 0.002971 y: 0.004722 
epoch: 138 [2018-02-19 07:15:39]
train loss: unary: 0.004696 pairwise: 0.003135 y: 0.004744 
epoch: 139 [2018-02-19 07:18:49]
test loss: unary: 0.030010 pairwise: 0.003923 y: 0.030243 
epoch: 140 [2018-02-19 07:19:50]
train loss: unary: 0.004665 pairwise: 0.003121 y: 0.004716 
epoch: 141 [2018-02-19 07:23:00]
train loss: unary: 0.004638 pairwise: 0.003070 y: 0.004683 
epoch: 142 [2018-02-19 07:26:11]
train loss: unary: 0.004664 pairwise: 0.003108 y: 0.004708 
epoch: 143 [2018-02-19 07:29:20]
train loss: unary: 0.004605 pairwise: 0.003098 y: 0.004640 
epoch: 144 [2018-02-19 07:32:31]
test loss: unary: 0.029872 pairwise: 0.003915 y: 0.029995 
epoch: 145 [2018-02-19 07:33:32]
train loss: unary: 0.004638 pairwise: 0.003144 y: 0.004691 
epoch: 146 [2018-02-19 07:36:42]
train loss: unary: 0.004516 pairwise: 0.003064 y: 0.004561 
epoch: 147 [2018-02-19 07:39:52]
train loss: unary: 0.004635 pairwise: 0.003117 y: 0.004678 
epoch: 148 [2018-02-19 07:43:03]
train loss: unary: 0.004536 pairwise: 0.003164 y: 0.004581 
epoch: 149 [2018-02-19 07:46:13]
test loss: unary: 0.028460 pairwise: 0.003897 y: 0.028532 
epoch: 150 [2018-02-19 07:47:14]
train loss: unary: 0.004542 pairwise: 0.003021 y: 0.004581 
epoch: 151 [2018-02-19 07:50:24]
train loss: unary: 0.004409 pairwise: 0.002950 y: 0.004454 
epoch: 152 [2018-02-19 07:53:34]
train loss: unary: 0.004459 pairwise: 0.003126 y: 0.004501 
epoch: 153 [2018-02-19 07:56:46]
train loss: unary: 0.004346 pairwise: 0.002940 y: 0.004389 
epoch: 154 [2018-02-19 07:59:57]
test loss: unary: 0.029598 pairwise: 0.003903 y: 0.029602 
epoch: 155 [2018-02-19 08:00:57]
train loss: unary: 0.004485 pairwise: 0.003080 y: 0.004530 
epoch: 156 [2018-02-19 08:04:08]
train loss: unary: 0.004407 pairwise: 0.003012 y: 0.004446 
epoch: 157 [2018-02-19 08:07:19]
train loss: unary: 0.004574 pairwise: 0.003061 y: 0.004621 
epoch: 158 [2018-02-19 08:10:29]
train loss: unary: 0.004436 pairwise: 0.002984 y: 0.004472 
epoch: 159 [2018-02-19 08:13:39]
test loss: unary: 0.029477 pairwise: 0.003889 y: 0.029550 
epoch: 160 [2018-02-19 08:14:40]
train loss: unary: 0.004436 pairwise: 0.002929 y: 0.004477 
epoch: 161 [2018-02-19 08:17:50]
train loss: unary: 0.004563 pairwise: 0.003088 y: 0.004609 
epoch: 162 [2018-02-19 08:21:00]
train loss: unary: 0.004275 pairwise: 0.002978 y: 0.004317 
epoch: 163 [2018-02-19 08:24:12]
train loss: unary: 0.004293 pairwise: 0.002986 y: 0.004334 
epoch: 164 [2018-02-19 08:27:22]
test loss: unary: 0.029329 pairwise: 0.003886 y: 0.029427 
epoch: 165 [2018-02-19 08:28:22]
train loss: unary: 0.004317 pairwise: 0.003016 y: 0.004361 
epoch: 166 [2018-02-19 08:31:33]
train loss: unary: 0.004309 pairwise: 0.002958 y: 0.004348 
epoch: 167 [2018-02-19 08:34:43]
train loss: unary: 0.004276 pairwise: 0.003019 y: 0.004317 
epoch: 168 [2018-02-19 08:37:55]
train loss: unary: 0.004368 pairwise: 0.002992 y: 0.004407 
epoch: 169 [2018-02-19 08:41:06]
test loss: unary: 0.028758 pairwise: 0.003879 y: 0.028814 
epoch: 170 [2018-02-19 08:42:07]
train loss: unary: 0.004392 pairwise: 0.003012 y: 0.004432 
epoch: 171 [2018-02-19 08:45:17]
train loss: unary: 0.004259 pairwise: 0.002953 y: 0.004296 
epoch: 172 [2018-02-19 08:48:27]
train loss: unary: 0.004264 pairwise: 0.002934 y: 0.004309 
epoch: 173 [2018-02-19 08:51:41]
train loss: unary: 0.004307 pairwise: 0.002996 y: 0.004343 
epoch: 174 [2018-02-19 08:54:52]
test loss: unary: 0.029309 pairwise: 0.003872 y: 0.029391 
epoch: 175 [2018-02-19 08:55:53]
train loss: unary: 0.004200 pairwise: 0.002920 y: 0.004232 
epoch: 176 [2018-02-19 08:59:08]
train loss: unary: 0.004202 pairwise: 0.002977 y: 0.004240 
epoch: 177 [2018-02-19 09:02:22]
train loss: unary: 0.004212 pairwise: 0.002971 y: 0.004251 
epoch: 178 [2018-02-19 09:05:36]
train loss: unary: 0.004292 pairwise: 0.002879 y: 0.004328 
epoch: 179 [2018-02-19 09:08:50]
test loss: unary: 0.028424 pairwise: 0.003869 y: 0.028637 
epoch: 180 [2018-02-19 09:09:53]
train loss: unary: 0.004193 pairwise: 0.002962 y: 0.004233 
epoch: 181 [2018-02-19 09:13:06]
train loss: unary: 0.004229 pairwise: 0.002988 y: 0.004269 
epoch: 182 [2018-02-19 09:16:21]
train loss: unary: 0.004241 pairwise: 0.002984 y: 0.004274 
epoch: 183 [2018-02-19 09:19:32]
train loss: unary: 0.004110 pairwise: 0.002916 y: 0.004145 
epoch: 184 [2018-02-19 09:22:43]
test loss: unary: 0.027925 pairwise: 0.003858 y: 0.027968 
epoch: 185 [2018-02-19 09:23:43]
train loss: unary: 0.004265 pairwise: 0.003111 y: 0.004305 
epoch: 186 [2018-02-19 09:26:54]
train loss: unary: 0.004259 pairwise: 0.003013 y: 0.004298 
epoch: 187 [2018-02-19 09:30:04]
train loss: unary: 0.004117 pairwise: 0.002973 y: 0.004153 
epoch: 188 [2018-02-19 09:33:15]
train loss: unary: 0.004191 pairwise: 0.002958 y: 0.004230 
epoch: 189 [2018-02-19 09:36:26]
test loss: unary: 0.028722 pairwise: 0.003867 y: 0.028838 
epoch: 190 [2018-02-19 09:37:26]
train loss: unary: 0.004073 pairwise: 0.002982 y: 0.004109 
epoch: 191 [2018-02-19 09:40:37]
train loss: unary: 0.004311 pairwise: 0.003042 y: 0.004348 
epoch: 192 [2018-02-19 09:43:47]
train loss: unary: 0.004208 pairwise: 0.002999 y: 0.004246 
epoch: 193 [2018-02-19 09:46:57]
train loss: unary: 0.004051 pairwise: 0.002808 y: 0.004084 
epoch: 194 [2018-02-19 09:50:08]
test loss: unary: 0.028287 pairwise: 0.003847 y: 0.028350 
epoch: 195 [2018-02-19 09:51:08]
train loss: unary: 0.004143 pairwise: 0.002913 y: 0.004172 
epoch: 196 [2018-02-19 09:54:18]
train loss: unary: 0.004075 pairwise: 0.002929 y: 0.004114 
epoch: 197 [2018-02-19 09:57:29]
train loss: unary: 0.003966 pairwise: 0.002930 y: 0.004006 
epoch: 198 [2018-02-19 10:00:39]
train loss: unary: 0.004009 pairwise: 0.002828 y: 0.004046 
epoch: 199 [2018-02-19 10:03:49]
test loss: unary: 0.029193 pairwise: 0.003850 y: 0.029210 
epoch: 200 [2018-02-19 10:04:49]
train loss: unary: 0.004121 pairwise: 0.002971 y: 0.004152 
epoch: 201 [2018-02-19 10:08:00]
train loss: unary: 0.003992 pairwise: 0.002890 y: 0.004029 
epoch: 202 [2018-02-19 10:11:10]
train loss: unary: 0.004132 pairwise: 0.002934 y: 0.004164 
epoch: 203 [2018-02-19 10:14:20]
train loss: unary: 0.003929 pairwise: 0.002893 y: 0.003961 
epoch: 204 [2018-02-19 10:17:30]
test loss: unary: 0.028823 pairwise: 0.003853 y: 0.028855 
epoch: 205 [2018-02-19 10:18:31]
train loss: unary: 0.004088 pairwise: 0.002963 y: 0.004121 
epoch: 206 [2018-02-19 10:21:41]
train loss: unary: 0.004014 pairwise: 0.002954 y: 0.004049 
epoch: 207 [2018-02-19 10:24:51]
train loss: unary: 0.003990 pairwise: 0.002900 y: 0.004024 
epoch: 208 [2018-02-19 10:28:01]
train loss: unary: 0.004041 pairwise: 0.002965 y: 0.004081 
epoch: 209 [2018-02-19 10:31:10]
test loss: unary: 0.029025 pairwise: 0.003850 y: 0.029077 
epoch: 210 [2018-02-19 10:32:11]
train loss: unary: 0.003929 pairwise: 0.002861 y: 0.003963 
epoch: 211 [2018-02-19 10:35:20]
train loss: unary: 0.004040 pairwise: 0.002957 y: 0.004073 
epoch: 212 [2018-02-19 10:38:30]
train loss: unary: 0.003936 pairwise: 0.002889 y: 0.003969 
epoch: 213 [2018-02-19 10:41:40]
train loss: unary: 0.003875 pairwise: 0.002833 y: 0.003908 
epoch: 214 [2018-02-19 10:44:50]
test loss: unary: 0.028741 pairwise: 0.003846 y: 0.028818 
epoch: 215 [2018-02-19 10:45:51]
train loss: unary: 0.003878 pairwise: 0.002845 y: 0.003912 
epoch: 216 [2018-02-19 10:49:01]
train loss: unary: 0.003969 pairwise: 0.002905 y: 0.004003 
epoch: 217 [2018-02-19 10:52:12]
train loss: unary: 0.003891 pairwise: 0.002777 y: 0.003921 
epoch: 218 [2018-02-19 10:55:22]
train loss: unary: 0.003868 pairwise: 0.002770 y: 0.003899 
epoch: 219 [2018-02-19 10:58:32]
test loss: unary: 0.028358 pairwise: 0.003843 y: 0.028416 
epoch: 220 [2018-02-19 10:59:33]
train loss: unary: 0.003990 pairwise: 0.002828 y: 0.004025 
epoch: 221 [2018-02-19 11:02:43]
train loss: unary: 0.003997 pairwise: 0.002940 y: 0.004031 
epoch: 222 [2018-02-19 11:05:52]
train loss: unary: 0.003896 pairwise: 0.002757 y: 0.003929 
epoch: 223 [2018-02-19 11:09:02]
train loss: unary: 0.003856 pairwise: 0.002854 y: 0.003890 
epoch: 224 [2018-02-19 11:12:12]
test loss: unary: 0.028414 pairwise: 0.003835 y: 0.028479 
epoch: 225 [2018-02-19 11:13:13]
train loss: unary: 0.003900 pairwise: 0.002858 y: 0.003930 
epoch: 226 [2018-02-19 11:16:23]
train loss: unary: 0.003871 pairwise: 0.002863 y: 0.003906 
epoch: 227 [2018-02-19 11:19:34]
train loss: unary: 0.003758 pairwise: 0.002840 y: 0.003792 
epoch: 228 [2018-02-19 11:22:44]
train loss: unary: 0.003822 pairwise: 0.002863 y: 0.003855 
epoch: 229 [2018-02-19 11:25:54]
test loss: unary: 0.028485 pairwise: 0.003843 y: 0.028507 
epoch: 230 [2018-02-19 11:26:54]
train loss: unary: 0.003866 pairwise: 0.002950 y: 0.003899 
epoch: 231 [2018-02-19 11:30:04]
train loss: unary: 0.003918 pairwise: 0.002890 y: 0.003954 
epoch: 232 [2018-02-19 11:33:16]
train loss: unary: 0.003978 pairwise: 0.002950 y: 0.004008 
epoch: 233 [2018-02-19 11:36:26]
train loss: unary: 0.003758 pairwise: 0.002782 y: 0.003787 
epoch: 234 [2018-02-19 11:39:37]
test loss: unary: 0.028857 pairwise: 0.003837 y: 0.028919 
epoch: 235 [2018-02-19 11:40:37]
train loss: unary: 0.003896 pairwise: 0.002961 y: 0.003929 
epoch: 236 [2018-02-19 11:43:47]
train loss: unary: 0.003685 pairwise: 0.002804 y: 0.003715 
epoch: 237 [2018-02-19 11:46:57]
train loss: unary: 0.003867 pairwise: 0.002945 y: 0.003904 
epoch: 238 [2018-02-19 11:50:07]
train loss: unary: 0.003787 pairwise: 0.002822 y: 0.003822 
epoch: 239 [2018-02-19 11:53:17]
test loss: unary: 0.028848 pairwise: 0.003838 y: 0.028907 

Visualize Graph


In [10]:
# x = Variable(torch.zeros(1,3,256,256))
# y = net(x.cuda())
# g = make_dot(y[-1])

In [11]:
# g.render('net-transition_scale_{}'.format(transition_scale))

In [ ]: