In [1]:
import os, glob, platform, datetime, random
from collections import OrderedDict

import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from torch import functional as F
# import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms

import cv2
from PIL import Image
from tensorboardX import SummaryWriter

import numpy as np
from numpy.linalg import inv as denseinv
from scipy import sparse
from scipy.sparse import lil_matrix, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.sparse.linalg import inv as spinv
import scipy.misc

from myimagefoldershading import MyImageFolder
from mymodel import GradientNet
from myargs import Args
from myutils import MyUtils

Configurations


In [2]:
myutils = MyUtils()

args = Args()
args.arch = "densenet121"
args.epoches = 500
args.epoches_unary_threshold = 0
args.image_h = 256
args.image_w = 256
args.img_extentions = ["png"]
args.training_thresholds = [250,200,150,50,0,300]
args.base_lr = 1
args.lr = args.base_lr
args.snapshot_interval = 5000
args.debug = True


# growth_rate = (4*(2**(args.gpu_num)))
transition_scale=2
pretrained_scale=4
growth_rate = 32

#######
# args.test_scene = ['alley_2', 'bamboo_2', 'bandage_2', 'cave_4', 'market_5', 'mountain_1', 'shaman_3', 'sleeping_2', 'temple_3']
args.test_scene = 'bandage_1'
gradient=True
args.gpu_num = 1
#######

writer_comment = '{}_rgb_shading'.format(args.test_scene)
if gradient == True:
    writer_comment = '{}_gd_shading'.format(args.test_scene)

offset = 0.
if gradient == True: offset = 0.5

args.display_interval = 50
args.display_curindex = 0

system_ = platform.system()
system_dist, system_version, _ = platform.dist()
if system_ == "Darwin": 
    args.train_dir = '/Volumes/Transcend/dataset/sintel2'
    args.pretrained = False
elif platform.dist() ==  ('debian', 'jessie/sid', ''):
    args.train_dir = '/home/lwp/workspace/sintel2'
    args.pretrained = True
elif platform.dist() == ('debian', 'stretch/sid', ''):
    args.train_dir = '/home/cad/lwp/workspace/dataset/sintel2'
    args.pretrained = True

if platform.system() == 'Linux': use_gpu = True
else: use_gpu = False
if use_gpu:
    torch.cuda.set_device(args.gpu_num)
    

print(platform.dist())


('debian', 'jessie/sid', '')

My DataLoader


In [3]:
train_dataset = MyImageFolder(args.train_dir, 'train',
                       transforms.Compose(
        [transforms.ToTensor()]
    ), random_crop=True, 
    img_extentions=args.img_extentions, test_scene=args.test_scene, image_h=args.image_h, image_w=args.image_w)
test_dataset = MyImageFolder(args.train_dir, 'test', 
                       transforms.Compose(
        [transforms.CenterCrop((args.image_h, args.image_w)),
         transforms.ToTensor()]
    ), random_crop=False,
    img_extentions=args.img_extentions, test_scene=args.test_scene, image_h=args.image_h, image_w=args.image_w)

train_loader = data_utils.DataLoader(train_dataset,1,True,num_workers=1)
test_loader = data_utils.DataLoader(test_dataset,1,True,num_workers=1)

Load Pretrained Model

Defination

  • DenseNet-121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16)
    • First Convolution: 32M -> 16M -> 8M
    • every transition: 8M -> 4M -> 2M (downsample 1/2, except the last block)

In [4]:
densenet = models.__dict__[args.arch](pretrained=args.pretrained)

for param in densenet.parameters():
    param.requires_grad = False

if use_gpu: densenet.cuda()

In [5]:
ss = 6
s0 = ss*5
# s0 = 2

args.display_curindex = 0
args.base_lr = 0.05
args.display_interval = 20
args.momentum = 0.9
args.epoches = 240
args.training_thresholds = [0,0,0,0,0,s0]
args.training_merge_thresholds = [s0+ss*3*3,s0+ss*2*3, s0+ss*1*3, s0, -1, s0+ss*4*3]
args.power = 0.5



# pretrained = PreTrainedModel(densenet)
# if use_gpu: 
#     pretrained.cuda()


net = GradientNet(densenet=densenet, growth_rate=growth_rate, 
                  transition_scale=transition_scale, pretrained_scale=pretrained_scale,
                 gradient=gradient)
if use_gpu:
    net.cuda()

if use_gpu: 
    mse_losses = [nn.MSELoss().cuda()] * 6
    test_losses = [nn.MSELoss().cuda()] * 6
    mse_merge_losses = [nn.MSELoss().cuda()] * 6
    test_merge_losses = [nn.MSELoss().cuda()] * 6
else:
    mse_losses = [nn.MSELoss()] * 6
    mse_merge_losses = [nn.MSELoss()] * 6
    test_losses = [nn.MSELoss()] * 6
    test_merge_losses = [nn.MSELoss()] * 6


_ ConvTranspose2d weight 0.002867696673382022
_ ConvTranspose2d weight 0.002867696673382022
_ ConvTranspose2d weight 0.003031695312954162
_ ConvTranspose2d weight 0.003031695312954162
_ ConvTranspose2d weight 0.004419417382415922

In [6]:
def test_model(epoch, go_through_merge=False, phase='train'):
    if phase == 'train': net.train()
    else: net.eval()
    
    test_losses_trainphase = [0] * len(args.training_thresholds)
    test_cnts_trainphase   = [0.00001] * len(args.training_thresholds)  
    test_merge_losses_trainphase = [0] * len(args.training_thresholds)
    test_merge_cnts_trainphase   = [0.00001] * len(args.training_thresholds)
    
    for ind, data in enumerate(test_loader, 0):
        input_img, gt_albedo, gt_shading, test_scene, img_path = data
        input_img = Variable(input_img)
        gt_albedo = Variable(gt_albedo)
        gt_shading = Variable(gt_shading)
        if use_gpu:
            input_img = input_img.cuda(args.gpu_num)
        
#         pretrained.train(); ft_pretreained = pretrained(input_img)
        ft_test, merged_RGB = net(input_img, go_through_merge=go_through_merge)
            
        for i,v in enumerate(ft_test):
            if epoch < args.training_thresholds[i]: continue
            if i == 5: s = 1
            else: s = (2**(i+1))
            gt0 = gt_albedo.cpu().data.numpy()
            n,c,h,w = gt0.shape
            gt, display = myutils.processGt(gt0, scale_factor=s, gd=gradient, return_image=True)
            gt_mg, display_mg = myutils.processGt(gt0, scale_factor=s//2, gd=gradient, return_image=True)
            
            if use_gpu: 
                gt = gt.cuda()
                gt_mg = gt_mg.cuda()
            
            if i != 5: 
                loss = mse_losses[i](ft_test[i], gt)
                test_losses_trainphase[i] += loss.data.cpu().numpy()[0]
                test_cnts_trainphase[i] += 1
            
            if go_through_merge != False and i != 4:
                if ((go_through_merge == '32M') or
                    (go_through_merge == '16M' and i != 5) or  
                    (go_through_merge == '08M' and i != 5 and i > 0) or
                    (go_through_merge == '04M' and i != 5 and i > 1) or
                    (go_through_merge == '02M' and i != 5 and i > 2)):
                    if i==5: gt2=gt
                    else: gt2=gt_mg
#                     print(i)
#                     print('merge size', merged_RGB[i].size())
#                     print('gt2 size', gt2.size())
                    loss = mse_merge_losses[i](merged_RGB[i], gt2)
                    test_merge_losses_trainphase[i] += loss.data.cpu().numpy()[0]
                    test_merge_cnts_trainphase[i] += 1
            

            
            if ind == 0: 
                if i != 5:
                    v = v[0].cpu().data.numpy()
                    v = v.transpose(1,2,0)
                    v = v[:,:,0:3]
                    cv2.imwrite('snapshot{}/test-phase_{}-{}-{}.png'.format(args.gpu_num, phase, epoch, i), (v[:,:,::-1]+offset)*255)
                if go_through_merge != False and i != 4:
                    if ((go_through_merge == '32M') or
                    (go_through_merge == '16M' and i != 5) or  
                    (go_through_merge == '08M' and i != 5 and i > 0) or
                    (go_through_merge == '04M' and i != 5 and i > 1) or
                    (go_through_merge == '02M' and i != 5 and i > 2)):
                        v = merged_RGB[i][0].cpu().data.numpy()
                        v = v.transpose(1,2,0)
                        v = v[:,:,0:3]
                        cv2.imwrite('snapshot{}/test-mg-phase_{}-{}-{}.png'.format(args.gpu_num, phase, epoch, i), (v[:,:,::-1]+offset)*255)
                    
    run_losses = test_losses_trainphase
    run_cnts = test_cnts_trainphase
    writer.add_scalars('16M loss', {'test 16M phase {}'.format(phase): np.array([run_losses[0]/ run_cnts[0]])}, global_step=epoch)  
    writer.add_scalars('8M loss', {'test 8M phase {}'.format(phase): np.array([run_losses[1]/ run_cnts[1]])}, global_step=epoch) 
    writer.add_scalars('4M loss', {'test 4M phase {}'.format(phase): np.array([run_losses[2]/ run_cnts[2]])}, global_step=epoch) 
    writer.add_scalars('2M loss', {'test 2M ': np.array([run_losses[3]/ run_cnts[3]])}, global_step=epoch) 
    writer.add_scalars('1M loss', {'test 1M phase {}'.format(phase): np.array([run_losses[4]/ run_cnts[4]])}, global_step=epoch) 
    writer.add_scalars('merged loss', {'test merged phase {}'.format(phase): np.array([run_losses[5]/ run_cnts[5]])}, global_step=epoch)
    
    run_losses = test_merge_losses_trainphase
    run_cnts = test_merge_cnts_trainphase
    writer.add_scalars('16M loss', {'mg test 16M phase {}'.format(phase): np.array([run_losses[0]/ run_cnts[0]])}, global_step=epoch)  
    writer.add_scalars('8M loss', {'mg test 8M phase {}'.format(phase): np.array([run_losses[1]/ run_cnts[1]])}, global_step=epoch) 
    writer.add_scalars('4M loss', {'mg test 4M phase {}'.format(phase): np.array([run_losses[2]/ run_cnts[2]])}, global_step=epoch) 
    writer.add_scalars('2M loss', {'mg test 2M ': np.array([run_losses[3]/ run_cnts[3]])}, global_step=epoch) 
    writer.add_scalars('1M loss', {'mg test 1M phase {}'.format(phase): np.array([run_losses[4]/ run_cnts[4]])}, global_step=epoch) 
    writer.add_scalars('merged loss', {'mg test merged phase {}'.format(phase): np.array([run_losses[5]/ run_cnts[5]])}, global_step=epoch)

In [7]:
# training loop

writer = SummaryWriter(comment='-{}'.format(writer_comment))

parameters = filter(lambda p: p.requires_grad, net.parameters())
optimizer = optim.SGD(parameters, lr=args.base_lr, momentum=args.momentum)

def adjust_learning_rate(optimizer, epoch, beg, end, reset_lr=None, base_lr=args.base_lr):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    for param_group in optimizer.param_groups:
#         print('para gp', param_group)
        if reset_lr != None:
            param_group['lr'] = reset_lr
            continue
        param_group['lr'] = base_lr * (float(end-epoch)/(end-beg)) ** (args.power)
        if param_group['lr'] < 1.0e-8: param_group['lr'] = 1.0e-8
        

for epoch in range(args.epoches):
#     epoch = 234
    net.train()
    print('epoch: {} [{}]'.format(epoch, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))

    if epoch < args.training_thresholds[-1]: 
        adjust_learning_rate(optimizer, epoch, beg=0, end=s0-1)
    elif epoch < args.training_merge_thresholds[-1]:
        adjust_learning_rate(optimizer, (epoch-s0)%(ss), beg=0, end=ss-1, base_lr=args.base_lr)
    else:
        adjust_learning_rate(optimizer, epoch, beg=args.training_merge_thresholds[-1], end=args.epoches-1, base_lr=args.base_lr)  
        
        
    if epoch < args.training_thresholds[-1]: go_through_merge = False
    elif epoch >= args.training_merge_thresholds[5]: go_through_merge = '32M'
    elif epoch >= args.training_merge_thresholds[0]: go_through_merge = '16M'
    elif epoch >= args.training_merge_thresholds[1]: go_through_merge = '08M'
    elif epoch >= args.training_merge_thresholds[2]: go_through_merge = '04M'
    elif epoch >= args.training_merge_thresholds[3]: go_through_merge = '02M'

    run_losses = [0] * len(args.training_thresholds)
    run_cnts   = [0.00001] * len(args.training_thresholds)
    run_merge_losses = [0] * len(args.training_thresholds)
    run_merge_cnts   = [0.00001] * len(args.training_thresholds)
    if (epoch in args.training_thresholds) == True: 
        adjust_learning_rate(optimizer, epoch, reset_lr=args.base_lr, beg=-1, end=-1)
    if (epoch in args.training_merge_thresholds) == True:
        adjust_learning_rate(optimizer, epoch, reset_lr=args.base_lr, beg=-1, end=-1)
        
    writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], global_step=epoch)
    for ind, data in enumerate(train_loader, 0):
#         if  ind == 1 : break
        """prepare  training data"""
        input_img, gt_albedo, gt_shading, test_scene, img_path = data
        im = input_img[0,:,:,:].numpy(); im = im.transpose(1,2,0); im = im[:,:,::-1]*255
        input_img, gt_albedo, gt_shading = Variable(input_img), Variable(gt_albedo), Variable(gt_shading)
        if use_gpu: input_img, gt_albedo, gt_shading = input_img.cuda(), gt_albedo.cuda(), gt_shading.cuda()

        if args.display_curindex % args.display_interval == 0: cv2.imwrite('snapshot{}/input.png'.format(args.gpu_num), im)

        optimizer.zero_grad()
        
            
        ft_predict, merged_RGB = net(input_img, go_through_merge=go_through_merge)
        for i, threshold in enumerate(args.training_thresholds):
            if epoch >= threshold:
#             if epoch >= 0:
                """prepare resized gt"""
                if i == 5: s = 1
                else: s = (2**(i+1))
                gt0 = gt_albedo.cpu().data.numpy()
                n,c,h,w = gt0.shape
                gt, display = myutils.processGt(gt0, scale_factor=s, gd=gradient, return_image=True)
                gt_mg, display_mg = myutils.processGt(gt0, scale_factor=s//2, gd=gradient, return_image=True)
                if use_gpu: 
                    gt = gt.cuda()
                    gt_mg = gt_mg.cuda()
                if args.display_curindex % args.display_interval == 0:
                    display = display[:,:,0:3]
                    cv2.imwrite('snapshot{}/gt-{}-{}.png'.format(args.gpu_num, epoch, i), display[:,:,::-1]*255)                
                
                """compute loss"""
                if i != 5: 
                    loss = mse_losses[i](ft_predict[i], gt)
                    run_losses[i] += loss.data.cpu().numpy()[0]
                    loss.backward(retain_graph=True)
                    run_cnts[i] += 1
                
                if go_through_merge != False and i != 4:
                    if ((go_through_merge == '32M') or
                    (go_through_merge == '16M' and i != 5) or  
                    (go_through_merge == '08M' and i != 5 and i > 0) or
                    (go_through_merge == '04M' and i != 5 and i > 1) or
                    (go_through_merge == '02M' and i != 5 and i > 2)):
#                         print(epoch, go_through_merge, i)
                        
#                         print (merged_RGB[i].cpu().data.numpy().max(), merged_RGB[i].cpu().data.numpy().min())
                        if i==5: gt2=gt
                        else: gt2=gt_mg
#                         print(i)
#                         print('merge size', merged_RGB[i].size())
#                         print('gt2 size', gt2.size())
                        loss = mse_merge_losses[i](merged_RGB[i], gt2)
                        run_merge_losses[i] += loss.data.cpu().numpy()[0]
                        loss.backward(retain_graph=True)
                        run_merge_cnts[i] += 1
                
                """save training image"""
                if args.display_curindex % args.display_interval == 0:
                    
                    if i != 5:
                        im = (ft_predict[i].cpu().data.numpy()[0].transpose((1,2,0))+offset) * 255
                        im = im[:,:,0:3]
                        
                        cv2.imwrite('snapshot{}/train-{}-{}.png'.format(args.gpu_num, epoch, i), im[:,:,::-1])
                    
                    if go_through_merge != False and i != 4:
                        if ((go_through_merge == '32M') or
                        (go_through_merge == '16M' and i != 5) or  
                        (go_through_merge == '08M' and i != 5 and i > 0) or
                        (go_through_merge == '04M' and i != 5 and i > 1) or
                        (go_through_merge == '02M' and i != 5 and i > 2)):
                            im = (merged_RGB[i].cpu().data.numpy()[0].transpose((1,2,0))+offset) * 255
                            im = im[:,:,0:3]
                            cv2.imwrite('snapshot{}/train-mg-{}-{}.png'.format(args.gpu_num, epoch, i), im[:,:,::-1])
        optimizer.step()
        args.display_curindex += 1

    """ every epoch """
#     loss_output = 'ind: ' + str(args.display_curindex)
    loss_output = ''
    
    
    
    for i,v in enumerate(run_losses):
        if i == len(run_losses)-1: 
            loss_output += ' merged: %6f' % (run_losses[i] / run_cnts[i])
            continue
        loss_output += ' %2dM: %6f' % ((2**(4-i)), (run_losses[i] / run_cnts[i]))
    print(loss_output)
    loss_output = ''
    for i,v in enumerate(run_merge_losses):
        if i == len(run_merge_losses)-1: 
            loss_output += 'mg merged: %6f' % (run_merge_losses[i] / run_merge_cnts[i])
            continue
        loss_output += ' mg %2dM: %6f' % ((2**(4-i)), (run_merge_losses[i] / run_merge_cnts[i]))
    print(loss_output)
    
    """save at every epoch"""
    if (epoch+1) % 10 == 0:
        torch.save({
            'epoch': epoch,
            'args' : args,
            'state_dict': net.state_dict(),
            'optimizer': optimizer.state_dict()
        }, 'snapshot{}/snapshot-{}.pth.tar'.format(args.gpu_num, epoch))
    
    # test 
    if (epoch+1) % 5 == 0:
        test_model(epoch, phase='train', go_through_merge=go_through_merge)
        test_model(epoch, phase='test', go_through_merge=go_through_merge)

        writer.add_scalars('16M loss', {'train 16M ': np.array([run_losses[0]/ run_cnts[0]])}, global_step=epoch)  
        writer.add_scalars('8M loss', {'train 8M ': np.array([run_losses[1]/ run_cnts[1]])}, global_step=epoch) 
        writer.add_scalars('4M loss', {'train 4M ': np.array([run_losses[2]/ run_cnts[2]])}, global_step=epoch) 
        writer.add_scalars('2M loss', {'train 2M ': np.array([run_losses[3]/ run_cnts[3]])}, global_step=epoch) 
        writer.add_scalars('1M loss', {'train 1M ': np.array([run_losses[4]/ run_cnts[4]])}, global_step=epoch) 
        writer.add_scalars('merged loss', {'train merged ': np.array([run_losses[5]/ run_cnts[5]])}, global_step=epoch)


epoch: 0 [2018-01-03 00:24:14]
 16M: 0.003325  8M: 0.004966  4M: 0.006258  2M: 0.008486  1M: 0.010270 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 1 [2018-01-03 00:25:36]
 16M: 0.002436  8M: 0.003694  4M: 0.004514  2M: 0.006055  1M: 0.007109 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 2 [2018-01-03 00:26:57]
 16M: 0.002260  8M: 0.003362  4M: 0.004033  2M: 0.005261  1M: 0.006207 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 3 [2018-01-03 00:28:20]
 16M: 0.002174  8M: 0.003211  4M: 0.003758  2M: 0.004790  1M: 0.005613 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 4 [2018-01-03 00:29:40]
 16M: 0.002097  8M: 0.003046  4M: 0.003495  2M: 0.004310  1M: 0.005012 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 5 [2018-01-03 00:31:04]
 16M: 0.002051  8M: 0.002950  4M: 0.003331  2M: 0.003979  1M: 0.004579 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 6 [2018-01-03 00:32:24]
 16M: 0.002031  8M: 0.002894  4M: 0.003221  2M: 0.003847  1M: 0.004462 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 7 [2018-01-03 00:33:44]
 16M: 0.001952  8M: 0.002796  4M: 0.003110  2M: 0.003661  1M: 0.004274 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 8 [2018-01-03 00:35:04]
 16M: 0.002001  8M: 0.002814  4M: 0.003050  2M: 0.003542  1M: 0.004065 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 9 [2018-01-03 00:36:25]
 16M: 0.001922  8M: 0.002693  4M: 0.002884  2M: 0.003331  1M: 0.003825 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 10 [2018-01-03 00:37:50]
 16M: 0.001964  8M: 0.002712  4M: 0.002872  2M: 0.003212  1M: 0.003643 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 11 [2018-01-03 00:39:11]
 16M: 0.001881  8M: 0.002589  4M: 0.002697  2M: 0.003019  1M: 0.003520 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 12 [2018-01-03 00:40:30]
 16M: 0.001847  8M: 0.002538  4M: 0.002648  2M: 0.002954  1M: 0.003350 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 13 [2018-01-03 00:41:51]
 16M: 0.001887  8M: 0.002577  4M: 0.002635  2M: 0.002901  1M: 0.003270 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 14 [2018-01-03 00:43:11]
 16M: 0.001832  8M: 0.002485  4M: 0.002562  2M: 0.002824  1M: 0.003183 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 15 [2018-01-03 00:44:36]
 16M: 0.001867  8M: 0.002534  4M: 0.002559  2M: 0.002812  1M: 0.003140 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 16 [2018-01-03 00:45:56]
 16M: 0.001786  8M: 0.002415  4M: 0.002448  2M: 0.002687  1M: 0.003000 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 17 [2018-01-03 00:47:16]
 16M: 0.001750  8M: 0.002358  4M: 0.002375  2M: 0.002600  1M: 0.002921 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 18 [2018-01-03 00:48:37]
 16M: 0.001791  8M: 0.002393  4M: 0.002388  2M: 0.002578  1M: 0.002831 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 19 [2018-01-03 00:49:58]
 16M: 0.001799  8M: 0.002395  4M: 0.002411  2M: 0.002578  1M: 0.002798 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 20 [2018-01-03 00:51:24]
 16M: 0.001746  8M: 0.002321  4M: 0.002296  2M: 0.002488  1M: 0.002777 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 21 [2018-01-03 00:52:45]
 16M: 0.001836  8M: 0.002414  4M: 0.002355  2M: 0.002519  1M: 0.002754 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 22 [2018-01-03 00:54:08]
 16M: 0.001755  8M: 0.002309  4M: 0.002255  2M: 0.002408  1M: 0.002586 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 23 [2018-01-03 00:55:30]
 16M: 0.001777  8M: 0.002329  4M: 0.002257  2M: 0.002435  1M: 0.002599 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 24 [2018-01-03 00:56:50]
 16M: 0.001759  8M: 0.002323  4M: 0.002256  2M: 0.002412  1M: 0.002603 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 25 [2018-01-03 00:58:14]
 16M: 0.001711  8M: 0.002233  4M: 0.002169  2M: 0.002324  1M: 0.002502 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 26 [2018-01-03 00:59:35]
 16M: 0.001704  8M: 0.002241  4M: 0.002175  2M: 0.002300  1M: 0.002514 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 27 [2018-01-03 01:00:56]
 16M: 0.001738  8M: 0.002258  4M: 0.002135  2M: 0.002276  1M: 0.002447 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 28 [2018-01-03 01:02:16]
 16M: 0.001665  8M: 0.002189  4M: 0.002102  2M: 0.002235  1M: 0.002392 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 29 [2018-01-03 01:03:35]
 16M: 0.001717  8M: 0.002247  4M: 0.002196  2M: 0.002283  1M: 0.002436 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.000000 mg  1M: 0.000000mg merged: 0.000000
epoch: 30 [2018-01-03 01:05:01]
 16M: 0.001718  8M: 0.002252  4M: 0.002192  2M: 0.002401  1M: 0.002592 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.006867 mg  1M: 0.000000mg merged: 0.000000
epoch: 31 [2018-01-03 01:06:38]
 16M: 0.001710  8M: 0.002241  4M: 0.002203  2M: 0.002439  1M: 0.002605 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005978 mg  1M: 0.000000mg merged: 0.000000
epoch: 32 [2018-01-03 01:08:15]
 16M: 0.001739  8M: 0.002247  4M: 0.002164  2M: 0.002363  1M: 0.002508 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005644 mg  1M: 0.000000mg merged: 0.000000
epoch: 33 [2018-01-03 01:09:54]
 16M: 0.001680  8M: 0.002165  4M: 0.002065  2M: 0.002224  1M: 0.002385 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005275 mg  1M: 0.000000mg merged: 0.000000
epoch: 34 [2018-01-03 01:11:31]
 16M: 0.001686  8M: 0.002169  4M: 0.002096  2M: 0.002245  1M: 0.002363 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005080 mg  1M: 0.000000mg merged: 0.000000
epoch: 35 [2018-01-03 01:13:15]
 16M: 0.001702  8M: 0.002210  4M: 0.002124  2M: 0.002244  1M: 0.002382 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005128 mg  1M: 0.000000mg merged: 0.000000
epoch: 36 [2018-01-03 01:14:53]
 16M: 0.001699  8M: 0.002181  4M: 0.002104  2M: 0.002322  1M: 0.002441 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.005113 mg  1M: 0.000000mg merged: 0.000000
epoch: 37 [2018-01-03 01:16:30]
 16M: 0.001661  8M: 0.002126  4M: 0.002030  2M: 0.002233  1M: 0.002427 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004809 mg  1M: 0.000000mg merged: 0.000000
epoch: 38 [2018-01-03 01:18:09]
 16M: 0.001664  8M: 0.002123  4M: 0.002033  2M: 0.002197  1M: 0.002378 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004683 mg  1M: 0.000000mg merged: 0.000000
epoch: 39 [2018-01-03 01:19:47]
 16M: 0.001671  8M: 0.002123  4M: 0.002019  2M: 0.002147  1M: 0.002316 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004503 mg  1M: 0.000000mg merged: 0.000000
epoch: 40 [2018-01-03 01:21:32]
 16M: 0.001622  8M: 0.002086  4M: 0.001969  2M: 0.002105  1M: 0.002293 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004423 mg  1M: 0.000000mg merged: 0.000000
epoch: 41 [2018-01-03 01:23:11]
 16M: 0.001630  8M: 0.002084  4M: 0.001969  2M: 0.002106  1M: 0.002208 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004350 mg  1M: 0.000000mg merged: 0.000000
epoch: 42 [2018-01-03 01:24:48]
 16M: 0.001672  8M: 0.002117  4M: 0.002030  2M: 0.002214  1M: 0.002339 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004382 mg  1M: 0.000000mg merged: 0.000000
epoch: 43 [2018-01-03 01:26:25]
 16M: 0.001614  8M: 0.002042  4M: 0.001929  2M: 0.002092  1M: 0.002271 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004235 mg  1M: 0.000000mg merged: 0.000000
epoch: 44 [2018-01-03 01:28:03]
 16M: 0.001657  8M: 0.002096  4M: 0.001975  2M: 0.002112  1M: 0.002275 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.004218 mg  1M: 0.000000mg merged: 0.000000
epoch: 45 [2018-01-03 01:29:45]
 16M: 0.001586  8M: 0.001988  4M: 0.001865  2M: 0.001999  1M: 0.002122 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.003941 mg  1M: 0.000000mg merged: 0.000000
epoch: 46 [2018-01-03 01:31:22]
 16M: 0.001576  8M: 0.001987  4M: 0.001871  2M: 0.001980  1M: 0.002111 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.003873 mg  1M: 0.000000mg merged: 0.000000
epoch: 47 [2018-01-03 01:32:59]
 16M: 0.001566  8M: 0.001958  4M: 0.001859  2M: 0.001965  1M: 0.002045 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.000000 mg  2M: 0.003888 mg  1M: 0.000000mg merged: 0.000000
epoch: 48 [2018-01-03 01:34:36]
 16M: 0.001607  8M: 0.002026  4M: 0.001916  2M: 0.002051  1M: 0.002182 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.004502 mg  2M: 0.003947 mg  1M: 0.000000mg merged: 0.000000
epoch: 49 [2018-01-03 01:36:35]
 16M: 0.001587  8M: 0.002008  4M: 0.001891  2M: 0.002017  1M: 0.002202 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003818 mg  2M: 0.003974 mg  1M: 0.000000mg merged: 0.000000
epoch: 50 [2018-01-03 01:38:42]
 16M: 0.001592  8M: 0.001988  4M: 0.001855  2M: 0.001973  1M: 0.002100 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003581 mg  2M: 0.003822 mg  1M: 0.000000mg merged: 0.000000
epoch: 51 [2018-01-03 01:40:40]
 16M: 0.001618  8M: 0.001995  4M: 0.001853  2M: 0.001923  1M: 0.002046 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003444 mg  2M: 0.003752 mg  1M: 0.000000mg merged: 0.000000
epoch: 52 [2018-01-03 01:42:36]
 16M: 0.001561  8M: 0.001948  4M: 0.001815  2M: 0.001888  1M: 0.002008 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003321 mg  2M: 0.003672 mg  1M: 0.000000mg merged: 0.000000
epoch: 53 [2018-01-03 01:44:36]
 16M: 0.001569  8M: 0.001946  4M: 0.001800  2M: 0.001890  1M: 0.001991 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003248 mg  2M: 0.003655 mg  1M: 0.000000mg merged: 0.000000
epoch: 54 [2018-01-03 01:46:36]
 16M: 0.001618  8M: 0.002005  4M: 0.001868  2M: 0.001954  1M: 0.002078 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003318 mg  2M: 0.003759 mg  1M: 0.000000mg merged: 0.000000
epoch: 55 [2018-01-03 01:48:38]
 16M: 0.001619  8M: 0.002003  4M: 0.001876  2M: 0.001972  1M: 0.002036 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003169 mg  2M: 0.003742 mg  1M: 0.000000mg merged: 0.000000
epoch: 56 [2018-01-03 01:50:37]
 16M: 0.001543  8M: 0.001924  4M: 0.001801  2M: 0.001908  1M: 0.002027 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.003016 mg  2M: 0.003595 mg  1M: 0.000000mg merged: 0.000000
epoch: 57 [2018-01-03 01:52:38]
 16M: 0.001576  8M: 0.001953  4M: 0.001798  2M: 0.001890  1M: 0.001987 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002975 mg  2M: 0.003554 mg  1M: 0.000000mg merged: 0.000000
epoch: 58 [2018-01-03 01:54:37]
 16M: 0.001579  8M: 0.001941  4M: 0.001782  2M: 0.001840  1M: 0.001886 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002930 mg  2M: 0.003542 mg  1M: 0.000000mg merged: 0.000000
epoch: 59 [2018-01-03 01:56:37]
 16M: 0.001550  8M: 0.001896  4M: 0.001721  2M: 0.001799  1M: 0.001880 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002868 mg  2M: 0.003460 mg  1M: 0.000000mg merged: 0.000000
epoch: 60 [2018-01-03 01:58:43]
 16M: 0.001563  8M: 0.001926  4M: 0.001798  2M: 0.001892  1M: 0.001976 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002853 mg  2M: 0.003492 mg  1M: 0.000000mg merged: 0.000000
epoch: 61 [2018-01-03 02:00:44]
 16M: 0.001516  8M: 0.001870  4M: 0.001750  2M: 0.001840  1M: 0.001963 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002779 mg  2M: 0.003450 mg  1M: 0.000000mg merged: 0.000000
epoch: 62 [2018-01-03 02:02:42]
 16M: 0.001524  8M: 0.001877  4M: 0.001738  2M: 0.001809  1M: 0.001924 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002711 mg  2M: 0.003375 mg  1M: 0.000000mg merged: 0.000000
epoch: 63 [2018-01-03 02:04:41]
 16M: 0.001547  8M: 0.001886  4M: 0.001709  2M: 0.001797  1M: 0.001883 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002721 mg  2M: 0.003375 mg  1M: 0.000000mg merged: 0.000000
epoch: 64 [2018-01-03 02:06:44]
 16M: 0.001568  8M: 0.001908  4M: 0.001733  2M: 0.001795  1M: 0.001900 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002656 mg  2M: 0.003332 mg  1M: 0.000000mg merged: 0.000000
epoch: 65 [2018-01-03 02:08:53]
 16M: 0.001475  8M: 0.001810  4M: 0.001650  2M: 0.001716  1M: 0.001780 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.000000 mg  4M: 0.002587 mg  2M: 0.003270 mg  1M: 0.000000mg merged: 0.000000
epoch: 66 [2018-01-03 02:10:51]
 16M: 0.001551  8M: 0.001912  4M: 0.001749  2M: 0.001828  1M: 0.001941 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.003183 mg  4M: 0.002644 mg  2M: 0.003357 mg  1M: 0.000000mg merged: 0.000000
epoch: 67 [2018-01-03 02:13:28]
 16M: 0.001538  8M: 0.001894  4M: 0.001710  2M: 0.001796  1M: 0.001876 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002574 mg  4M: 0.002580 mg  2M: 0.003338 mg  1M: 0.000000mg merged: 0.000000
epoch: 68 [2018-01-03 02:16:03]
 16M: 0.001516  8M: 0.001853  4M: 0.001711  2M: 0.001751  1M: 0.001859 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002451 mg  4M: 0.002554 mg  2M: 0.003295 mg  1M: 0.000000mg merged: 0.000000
epoch: 69 [2018-01-03 02:18:39]
 16M: 0.001507  8M: 0.001848  4M: 0.001667  2M: 0.001746  1M: 0.001865 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002389 mg  4M: 0.002505 mg  2M: 0.003254 mg  1M: 0.000000mg merged: 0.000000
epoch: 70 [2018-01-03 02:21:18]
 16M: 0.001512  8M: 0.001849  4M: 0.001647  2M: 0.001700  1M: 0.001796 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002376 mg  4M: 0.002505 mg  2M: 0.003247 mg  1M: 0.000000mg merged: 0.000000
epoch: 71 [2018-01-03 02:23:48]
 16M: 0.001505  8M: 0.001820  4M: 0.001620  2M: 0.001679  1M: 0.001733 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002357 mg  4M: 0.002471 mg  2M: 0.003202 mg  1M: 0.000000mg merged: 0.000000
epoch: 72 [2018-01-03 02:26:16]
 16M: 0.001532  8M: 0.001858  4M: 0.001660  2M: 0.001727  1M: 0.001818 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002362 mg  4M: 0.002499 mg  2M: 0.003246 mg  1M: 0.000000mg merged: 0.000000
epoch: 73 [2018-01-03 02:28:49]
 16M: 0.001461  8M: 0.001795  4M: 0.001628  2M: 0.001697  1M: 0.001792 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002194 mg  4M: 0.002396 mg  2M: 0.003168 mg  1M: 0.000000mg merged: 0.000000
epoch: 74 [2018-01-03 02:31:26]
 16M: 0.001489  8M: 0.001816  4M: 0.001635  2M: 0.001692  1M: 0.001762 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002216 mg  4M: 0.002410 mg  2M: 0.003178 mg  1M: 0.000000mg merged: 0.000000
epoch: 75 [2018-01-03 02:34:07]
 16M: 0.001467  8M: 0.001765  4M: 0.001563  2M: 0.001634  1M: 0.001733 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002181 mg  4M: 0.002377 mg  2M: 0.003123 mg  1M: 0.000000mg merged: 0.000000
epoch: 76 [2018-01-03 02:36:38]
 16M: 0.001498  8M: 0.001797  4M: 0.001595  2M: 0.001652  1M: 0.001735 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002123 mg  4M: 0.002332 mg  2M: 0.003100 mg  1M: 0.000000mg merged: 0.000000
epoch: 77 [2018-01-03 02:39:07]
 16M: 0.001478  8M: 0.001784  4M: 0.001562  2M: 0.001622  1M: 0.001683 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002153 mg  4M: 0.002350 mg  2M: 0.003094 mg  1M: 0.000000mg merged: 0.000000
epoch: 78 [2018-01-03 02:41:36]
 16M: 0.001456  8M: 0.001779  4M: 0.001592  2M: 0.001648  1M: 0.001746 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002076 mg  4M: 0.002313 mg  2M: 0.003080 mg  1M: 0.000000mg merged: 0.000000
epoch: 79 [2018-01-03 02:44:08]
 16M: 0.001494  8M: 0.001819  4M: 0.001640  2M: 0.001707  1M: 0.001778 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002038 mg  4M: 0.002300 mg  2M: 0.003083 mg  1M: 0.000000mg merged: 0.000000
epoch: 80 [2018-01-03 02:46:41]
 16M: 0.001483  8M: 0.001770  4M: 0.001552  2M: 0.001611  1M: 0.001686 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002047 mg  4M: 0.002280 mg  2M: 0.003028 mg  1M: 0.000000mg merged: 0.000000
epoch: 81 [2018-01-03 02:49:11]
 16M: 0.001471  8M: 0.001758  4M: 0.001540  2M: 0.001581  1M: 0.001670 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.002021 mg  4M: 0.002258 mg  2M: 0.002991 mg  1M: 0.000000mg merged: 0.000000
epoch: 82 [2018-01-03 02:51:44]
 16M: 0.001433  8M: 0.001719  4M: 0.001516  2M: 0.001579  1M: 0.001643 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.001955 mg  4M: 0.002206 mg  2M: 0.002949 mg  1M: 0.000000mg merged: 0.000000
epoch: 83 [2018-01-03 02:54:16]
 16M: 0.001416  8M: 0.001709  4M: 0.001500  2M: 0.001552  1M: 0.001622 merged: 0.000000
 mg 16M: 0.000000 mg  8M: 0.001893 mg  4M: 0.002132 mg  2M: 0.002882 mg  1M: 0.000000mg merged: 0.000000
epoch: 84 [2018-01-03 02:56:45]
Process Process-117:
Traceback (most recent call last):
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
    self.run()
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
  File "/home/lwp/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 34, in _worker_loop
    r = index_queue.get()
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/queues.py", line 335, in get
    res = self._reader.recv_bytes()
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/connection.py", line 216, in recv_bytes
    buf = self._recv_bytes(maxlength)
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
    buf = self._recv(4)
  File "/home/lwp/anaconda3/lib/python3.6/multiprocessing/connection.py", line 379, in _recv
    chunk = read(handle, remaining)
KeyboardInterrupt
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-c4cb16df2fec> in <module>()
     99 #                         print('merge size', merged_RGB[i].size())
    100 #                         print('gt2 size', gt2.size())
--> 101                         loss = mse_merge_losses[i](merged_RGB[i], gt2)
    102                         run_merge_losses[i] += loss.data.cpu().numpy()[0]
    103                         loss.backward(retain_graph=True)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    222         for hook in self._forward_pre_hooks.values():
    223             hook(self, input)
--> 224         result = self.forward(*input, **kwargs)
    225         for hook in self._forward_hooks.values():
    226             hook_result = hook(self, input, result)

~/anaconda3/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
    270     def forward(self, input, target):
    271         _assert_no_grad(target)
--> 272         return F.mse_loss(input, target, size_average=self.size_average)
    273 
    274 

~/anaconda3/lib/python3.6/site-packages/torch/nn/functional.py in mse_loss(input, target, size_average)
    817 
    818 def mse_loss(input, target, size_average=True):
--> 819     return _functions.thnn.MSELoss.apply(input, target, size_average)
    820 
    821 

~/anaconda3/lib/python3.6/site-packages/torch/nn/_functions/thnn/auto.py in forward(ctx, input, target, *args)
     45         output = input.new(1)
     46         getattr(ctx._backend, update_output.name)(ctx._backend.library_state, input, target,
---> 47                                                   output, *ctx.additional_args)
     48         return output
     49 

KeyboardInterrupt: 

Visualize Graph


In [ ]:
from graphviz import Digraph
import torch
from torch.autograd import Variable


def make_dot(var, params=None):
    """ Produces Graphviz representation of PyTorch autograd graph
    Blue nodes are the Variables that require grad, orange are Tensors
    saved for backward in torch.autograd.Function
    Args:
        var: output Variable
        params: dict of (name, Variable) to add names to node that
            require grad (TODO: make optional)
    """
    if params is not None:
        assert isinstance(params.values()[0], Variable)
        param_map = {id(v): k for k, v in params.items()}

    node_attr = dict(style='filled',
                     shape='box',
                     align='left',
                     fontsize='12',
                     ranksep='0.1',
                     height='0.2')
    dot = Digraph(node_attr=node_attr, graph_attr=dict(size="10240,10240"), format='svg')
    seen = set()

    def size_to_str(size):
        return '('+(', ').join(['%d' % v for v in size])+')'

    def add_nodes(var):
        if var not in seen:
            if torch.is_tensor(var):
                dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
            elif hasattr(var, 'variable'):
                u = var.variable
                name = param_map[id(u)] if params is not None else ''
                node_name = '%s\n %s' % (name, size_to_str(u.size()))
                dot.node(str(id(var)), node_name, fillcolor='lightblue')
            else:
                dot.node(str(id(var)), str(type(var).__name__))
            seen.add(var)
            if hasattr(var, 'next_functions'):
                for u in var.next_functions:
                    if u[0] is not None:
                        dot.edge(str(id(u[0])), str(id(var)))
                        add_nodes(u[0])
            if hasattr(var, 'saved_tensors'):
                for t in var.saved_tensors:
                    dot.edge(str(id(t)), str(id(var)))
                    add_nodes(t)
    add_nodes(var.grad_fn)
    return dot

In [ ]:
# x = Variable(torch.zeros(1,3,256,256))
# y = net(x.cuda())
# g = make_dot(y[-1])

In [ ]:
# g.render('net-transition_scale_{}'.format(transition_scale))

In [ ]:


In [ ]: