In [1]:
import numpy as np
import cv2

In [4]:
im = cv2.imread('/home/lwp/workspace/sintel2/albedo/alley_1/frame_0001.png')
ret = im[100:100+256,400:400+256,:]
h,w,c = ret.shape
s = 2
x = ret
for i in range(5): 
#     x = cv2.resize(x, (h//2**(i+1),w//2**(i+1)), cv2.INTER_NEAREST)
    x = cv2.resize(x, (h//2**(i+1),w//2**(i+1)))
# x = cv2.resize(x, (h//32,w//32), cv2.INTER_NEAREST)
y = x
# for i in range(5): y = cv2.resize(y, (h//2**(4-i),w//2**(4-i)), cv2.INTER_NEAREST)
for i in range(5): y = cv2.resize(y, (h//2**(4-i),w//2**(4-i)))
cv2.imwrite('y1.png',y)


Out[4]:
True

In [ ]:
im = cv2.imread('/home/lwp/workspace/sintel2/albedo/alley_1/frame_0001.png')
ret = im[100:100+256,400:400+256,:]
h,w,c = ret.shape
s = 2
x = ret
x = cv2.resize(x, (h//32,w//32), cv2.INTER_NEAREST)
y = cv2.resize(x, (h,w), cv2.INTER_NEAREST)
cv2.imwrite('y.png',y)

In [12]:
def calOutputChannel(input_channel, blocks, bn_size=4, growth_rate=32, transition_scale=2):
    output_channel = input_channel
    sum_ = 0
    print(blocks)
    for b in blocks:
        output_channel += b * growth_rate
        
        sum_ += b * output_channel
        sum_ += b * growth_rate
        
        output_channel //= transition_scale
        
        sum_ += output_channel
        
        print (output_channel)
        
    print('sum = ', sum_)
    return output_channel

# a = [64,64,128,256,1024]
# a = [x//2 for x in a]
# print (a)

# print (calOutputChannel(512, (24,24,24), bn_size=4, growth_rate=32, transition_scale=2))
# print (calOutputChannel(1024, (24,24,24), bn_size=4, growth_rate=32, transition_scale=8))
calOutputChannel(3, (5,5), bn_size=4, growth_rate=32, transition_scale=2)
print ()
calOutputChannel(6, [7], bn_size=4, growth_rate=32, transition_scale=2)
# print (calOutputChannel(1024, (24,24,24), bn_size=4, growth_rate=32, transition_scale=2))


(5, 5)
81
120

[7]
115
Out[12]:
115

In [ ]:
from tensorboardX import SummaryWriter

In [ ]:


In [ ]:
writer = SummaryWriter(comment='hahaha')
writer.add_text('aa','bb')

In [ ]:
import torch
import torch.nn as nn
from torch.autograd import Variable

# x = Variable(torch.ones(2, 2), requires_grad=True)
# print(x)

class A(nn.Module):
    def __init__(self):
        super(A, self).__init__()
        self.layerA = nn.Sigmoid()
    def forward(self, x):
        return self.layerA(x)
class B(nn.Module):
    def __init__(self):
        super(B, self).__init__()
        self.layerB = nn.Sigmoid()
    def forward(self, x):
        return self.layerB(x)
a = A().cuda(2)
b = B().cuda(3)
x = Variable(torch.ones(2, 2), requires_grad=True).cuda(2)
print(x)
y1 = a(x)
print(y1)
y2 = y1.cuda(3)
print(y2)
z = b(y2)
print(z)
print(z.grad_fn)
print(z.grad_fn.next_functions[0][0])
print(z.grad_fn.next_functions[0][0].next_functions[0][0])

In [ ]:
# Create tensors.
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)

# Build a computational graph.
y = w * x + b    # y = 2 * x + 3

# Compute gradients.
y.backward()

# Print out the gradients.
print(x.grad)    # x.grad = 2 
print(w.grad)    # w.grad = 1 
print(b.grad)    # b.grad = 1

In [ ]:
class Foo(object):
    class Bar(object):
        pass
    def __init__(self):
        self.bar = self.Bar()

foo = Foo()

In [ ]:
def func():
    return 2, []
a,b = func()
print(a,b)

In [ ]:
import torch
import torch.nn as nn
from torch.autograd import Variable
x = Variable(torch.Tensor([[2,3,4],[3,4,5]]))
h,w=x.size()
print(h,w)

In [ ]:
from tensorboardX import SummaryWriter
from myargs import Args

ss = 6
s0 = ss*2
args = Args()
args.display_curindex = 0
args.base_lr = 0.05
args.display_interval = 20
args.momentum = 0.9
args.epoches = 120
args.training_thresholds = [0,0,0,0,0,s0]
args.training_merge_thresholds = [s0+ss*9,s0+ss*6, s0+ss*3, s0, -1, s0+ss*12]
args.power = 0.5

writer = SummaryWriter(comment='-{}'.format('test lr'))
optimizer=None
lr = args.base_lr
def adjust_learning_rate(optimizer, epoch, beg, end, reset_lr=None):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    global lr
    print('adjust', epoch, beg, end, lr)
    lr = args.base_lr * (float(end-epoch)/(end-beg)) ** (args.power)
    if lr < 1.0e-8: lr = 1.0e-8
        
        
  
    writer.add_scalar('lr', lr, global_step=epoch)

In [12]:
32 * 5


Out[12]:
160