In [ ]:


In [1]:
import torch
from torch.autograd import Variable
from torch.autograd import Function
import numpy as np

In [ ]:
# PIPELINE
# feed in a tensor of size batch_size * max_set_size * embd_dim
# coud possibly be a packed sequence which includes length information for each batch_size
# 0) Assume we have created sequences in DataSet (they are already padded)
# 1) Collapse to 2D to feed through kernel embedding
# 2) Reconstruct using for-loop the kernel matrix L, do eigendecomposition and sample from DPP
# 3) New Kernel should batch_size * alpha_iter * embd_dim (contains summed_selection for each batch + iteration)
# 4) Collapse to 2D and feed through prediction network
# 5) Get something of size (batch_size x alpha_iter) * target_dim, make target compatible with this
# 6) Backpropagate the loss
mask = data.abs().sum(2).sign().squeeze()
lengths = mask.sum(1)

In [88]:
samples


Out[88]:
[Variable containing:
  0  1  1  0
  0  1  1  1
  1  0  0  0
  1  1  1  1
  1  0  1  0
 [torch.FloatTensor of size 5x4], Variable containing:
  0  1  1
  0  1  0
  1  0  1
  1  1  1
  1  1  0
 [torch.FloatTensor of size 5x3], Variable containing:
  0  0  1  1  1  1
  1  1  0  1  0  1
  1  0  1  0  1  0
  0  1  0  1  0  1
  0  1  1  0  0  0
 [torch.FloatTensor of size 5x6], Variable containing:
  1  0  1  1
  0  1  1  0
  1  0  1  1
  0  1  1  1
  1  0  1  1
 [torch.FloatTensor of size 5x4], Variable containing:
  0  1  1  1  0
  1  0  0  0  1
  1  0  0  1  0
  0  0  0  1  1
  0  0  1  0  0
 [torch.FloatTensor of size 5x5]]

In [69]:
[torch.zeros(alpha_iter,i) for i in (max_set_size - length.data)]


Out[69]:
[
  0  0
  0  0
  0  0
  0  0
  0  0
 [torch.FloatTensor of size 5x2], 
  0  0  0
  0  0  0
  0  0  0
  0  0  0
  0  0  0
 [torch.FloatTensor of size 5x3], 
  0
  0
  0
  0
  0
 [torch.FloatTensor of size 5], 
  0  0
  0  0
  0  0
  0  0
  0  0
 [torch.FloatTensor of size 5x2], 
  0
  0
  0
  0
  0
 [torch.FloatTensor of size 5x1]]

In [66]:
torch.cat([torch.zeros(0),torch.zeros(0)])


Out[66]:
[torch.FloatTensor with no dimension]

In [269]:
def my_hook(i, j):
    def my_print(module, grad_in, grad_out):
        print(i,j, loss_list[i][j])
    return my_print

In [270]:
import torch
import torch.nn as nn

# Set up data
batch_size = 5
max_set_size = 6
feat_dim = 7
target_dim = 3
alpha_iter = 5
hidden_dim = 10
alpha_iter = 2
kernel = nn.Linear(feat_dim, hidden_dim)
predictor = nn.Linear(feat_dim, target_dim)

data = torch.zeros(batch_size, max_set_size, feat_dim)
data[0,:4] = torch.randn(4,feat_dim)
data[1,:3] = torch.randn(3,feat_dim)
data[2,:6] = torch.randn(6,feat_dim)
data[3,:4] = torch.randn(4,feat_dim)
data[4,:5] = torch.randn(5,feat_dim)
data = Variable(data)
target = Variable(torch.randn(batch_size, target_dim))
criterion = nn.MSELoss()

# Forward pass
mask = data.abs().sum(2).sign().byte()
#length = mask.sum(1).squeeze()
batch_kernel = kernel(data.masked_select(mask.expand_as(data)).view(-1, feat_dim))
#batch_kernel.sum().backward()
s = 0
samples = [[] for i in range(batch_size)]

for i, e in enumerate(length.data):
    
    A = batch_kernel[s:e]
    L = A.mm(A.t())
    e, v = custom_eig()(L)
    
    for j in range(alpha_iter):
        subset = DPPLayer()(e,v)
        DPPLayer.register_backward_hook(my_hook(i,j))
        sample = pad_with_zeros(subset, max_set_size)
        samples[i].append(sample)
        
samples = [torch.stack(i) for i in samples]
reps = [samples[i].mm(data[i]) for i in range(batch_size)]
big = torch.cat(reps)
predictions = predictor(big).view(batch_size, alpha_iter, target_dim)
target = target.unsqueeze(1).expand(batch_size, alpha_iter, target_dim)
loss = criterion(predictions, target)
loss_list = list(((predictions - target)**2).mean(2).view(-1).data)
loss_list = list(((predictions - target)**2).mean(2).data)
loss_list = [list(i.view(-1)) for i in loss_list]
loss.backward()


0
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-270-655102da90d5> in <module>()
     39     for j in range(alpha_iter):
     40         subset = DPPLayer()(e,v)
---> 41         DPPLayer.register_backward_hook(my_hook(i,j))
     42         sample = pad_with_zeros(subset, max_set_size)
     43         samples[i].append(sample)

TypeError: register_backward_hook() missing 1 required positional argument: 'hook'

In [217]:
loss_list = list(((predictions - target)**2).mean(2).data)
loss_list = [list(i.view(-1)) for i in loss_list]

In [248]:
samples


Out[248]:
[Variable containing:
  1  1  1  1  0  0
  0  1  1  1  0  0
 [torch.FloatTensor of size 2x6], Variable containing:
  0  0  0  0  0  0
  1  0  0  0  0  0
 [torch.FloatTensor of size 2x6], Variable containing:
  1  0  1  1  1  0
  1  1  0  1  1  1
 [torch.FloatTensor of size 2x6], Variable containing:
  1  1  1  0  0  0
  1  1  0  1  0  0
 [torch.FloatTensor of size 2x6], Variable containing:
  0  1  0  1  0  0
  0  1  1  0  0  0
 [torch.FloatTensor of size 2x6]]

In [42]:
kernel = med[start:end]
    L = kernel.mm(kernel.t())
    e, v = custom_eig()(L)
    for j in range(3):
        subset = DPP()(e, v)
        my_list[i].append(subset)
    start = end
new_list = [torch.stack(l) for l in my_list]

In [30]:



eigenvalues and eigenvectors
CVT error:  1.85858762189e-15
CVT error:  1.1633811087e-14
adj error:  8.881784197e-16

In [ ]:


In [1]:
# Set-up
# THIS COULD BE IT!!
# THIS IS IT!
# Let's do it!

N = 3
A = torch.randn(N,N).double()
A = A.mm(A.t())
#A = torch.Tensor(A).float()
e, v = torch.eig(A, eigenvectors=True)
e = e[:,0]



# Random perturbation for forward
dA = torch.randn(N,N)
E = e.expand(N,N) - e.expand(N,N).t()
F = 1 / (E + torch.eye(N)) - torch.eye(N)
P = v.inverse().mm(dA).mm(v)
de = torch.eye(N) * P
dv = v.mm(F * P)

# random perturbation for backward
be = torch.randn(N).diag()
bv = torch.randn(N, N)
#be = torch.ones(N).diag()
#bv = torch.ones(N, N)
med = be + F * (v.t().mm(bv))
bA = v.t().inverse().mm(med).mm(v.t())

print('adj error: ',torch.sum(dA*bA)-torch.sum(de*be)-torch.sum(dv*bv))
bA

# Check forward pass using analytic function and complex matrices


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-1-43f4e4a05c4d> in <module>()
      5 
      6 N = 3
----> 7 A = torch.randn(N,N).double()
      8 A = A.mm(A.t())
      9 #A = torch.Tensor(A).float()

NameError: name 'torch' is not defined

In [80]:
# Checking SVD IN NUMPY!!

import numpy as np

# General Set-up
N = 4
A = 0.1 * np.random.randn(N, N) + np.diag(np.arange(1, N+1))
B = np.random.randn(N, N)
I = np.eye(N)
dA = np.random.randn(N, N)
dB = np.random.randn(N, N)
bC = np.random.randn(N, N)
eps = 1e-20
epsi = 1 / eps
Ae = A + 1j*eps*dA
Be = B + 1j*eps*dB

# SVD
u, s, vT = np.linalg.svd(A)
s = np.diag(s)

De, Du =np.linalg.eig(Ae.dot(Ae.T))
D = np.real(De)
U = np.real(Ue)

# make dC diagonal equal to zero
Ue = Ue.dot(np.diag(1 / np.diag(np.linalg.inv(U).dot(Ue))))
E = np.outer(np.ones(N), D) - np.outer(D, np.ones(N))
F = 1 / (E + np.eye(N)) - np.eye(N)
P = np.linalg.inv(U).dot(dA.dot(U))
dD = np.eye(N) * P
dU = U.dot(F*P)

bD = np.diag(np.random.randn(N))
bU = np.random.randn(N,N)
bD = bD + F * (U.T.dot(bU))
bA = np.linalg.inv(U.T).dot(bD.dot(U.T))
print('eigenvalues and eigenvectors')
print('CVT error: ', np.linalg.norm(np.diag(dD)-epsi*np.imag(De)))
print('CVT error: ', np.linalg.norm(dU-epsi*np.imag(Ue)))
print('adj error: ',np.sum(dA*bA)-np.sum(dD*bD)-np.sum(dU*bU))


eigenvalues and eigenvectors
CVT error:  10.7986414951
CVT error:  3.06826003225
adj error:  4.4408920985e-16

In [77]:
De, Du =np.linalg.eig(Ae.dot(Ae.T))

In [78]:
De


Out[78]:
array([  0.75808267 -5.96442145e-20j,  16.56881395 +5.61666792e-20j,
         3.77822669 +1.92363529e-20j,   8.72362952 +4.89203296e-20j])

In [40]:
# Let's do the above thing for SVD!!
# First just do it theoretically, then try with my auto_grad
from dpp_nets.my_torch.linalg import custom_svd

M = 4
N = 5
eps = 1e-20
epsi = 1 / eps
dA = torch.randn(M, N).double()

A = torch.randn(M,N).double()
vecs, vals, v = torch.svd(A, some=False) # M x M, M, N x N

# Random perturbation for forward pass
utdAv = vecs.t().mm(dA).mm(v) #M x N
dP1 = utdAv[:,:M] # M x M
dP2 = utdAv[:,M:] # M x (N - M)
dS = utdAv.diag() # M
E = vals.expand(M,M) - vals.expand(M,M).t() # mask
F = 1 / (E + torch.eye(M).double()) - torch.eye(M).double()
dC = F * (dP1.mm(vals.diag()) + vals.diag().mm(dP1.t()))
dU = vecs.mm(dC)
dvals = dS
dvecs = dU

# Backward PASS
bvals = torch.randn(M).diag().double()
bvecs = torch.randn(M, M).double()
bP1 = (vecs.t() * F).mm(bvecs).mm(vals.diag()) + bvecs.t().mm(vecs * F.t()).mm(vals.diag())
med = bvals + bP1
bA = vecs.mm(med).mm(v[:,:M].t())

# Now check it
print('adj error: ',torch.sum(dA*bA)-torch.sum(dvals*bvals.diag())-torch.sum(dvecs*bvecs))


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-40-ecf5b3a6aaa4> in <module>()
      8 epsi = 1 / eps
      9 dA = torch.randn(M, N).double()
---> 10 Ae = A + 1j*eps*dA
     11 
     12 

/Users/Max/Coding/anaconda2/envs/torch/lib/python3.6/site-packages/torch/tensor.py in __mul__(self, other)
    281 
    282     def __mul__(self, other):
--> 283         return self.mul(other)
    284     __rmul__ = __mul__
    285 

TypeError: mul received an invalid combination of arguments - got (complex), but expected one of:
 * (float value)
      didn't match because some of the arguments have invalid types: (complex)
 * (torch.DoubleTensor other)
      didn't match because some of the arguments have invalid types: (complex)

In [39]:
dS


Out[39]:
-1.0997
 0.3033
-1.5958
 0.7418
[torch.DoubleTensor of size 4]

In [20]:
vecs.mm(bvals).


Out[20]:
-0.2665  0.1132 -1.0704 -0.6101 -1.0782
 0.3332 -0.1740  1.1825  0.1516  0.1103
 0.1841  0.0268 -0.9359  0.1113 -0.7498
 0.2306  0.4855 -0.0951 -0.1724  0.2393
[torch.DoubleTensor of size 4x5]

In [14]:
bvecs


Out[14]:
 0.0642  0.5162  1.8358  1.8688
 1.5988  1.5422 -1.0641 -0.7090
 0.3098 -0.4780 -0.0715  0.2414
 1.1151 -0.9208 -0.7757 -0.1190
[torch.FloatTensor of size 4x4]

In [ ]:
A_var = Variable(A, requires_grad=True)
e_var, v_var = custom_eig()(A_var)
be_var = torch.FloatTensor(be.diag())
bv_var = torch.FloatTensor(bv)
e_var.backward(be_var, retain_variables=True)
v_var.backward(bv_var)
bA = A_var.grad.data
bA

# artificial forward pass - simply re-use the tensors from the other cell
# in fact by showing that the backward gradients agree, we have already established proof of concept
print('adj error: ',torch.sum(dA*bA)-torch.sum(de*be)-torch.sum(dv*bv))
bA

In [ ]:
# Scalability - Flexible batch_size
import torch
import torch.nn as nn

torch.manual_seed(10)
batch_size = 5
max_set_size = 6
feat_dim = 4
hidden_dim = 300
data = torch.randn(batch_size, max_set_size, feat_dim)
model = nn.Linear(feat_dim, hidden_dim)

In [ ]:
# now make it tensor-ready
mask, _ = data.abs().max(dim=2)
length = mask.sign().sum(dim=1).squeeze()
mask = mask.sign().expand_as(data).byte()

my_input = Variable(data, requires_grad=True)
compressed = my_input.masked_select(Variable(mask)).view(-1,feat_dim)
med = model(compressed)

# now do the eigendecomposition (for this need to re-assemble the tensor again)
# this probably needs a for-loop :(((((
# for i in range(batch_size):
start = 0 
my_list = [[] for i in range(batch_size)]
for i, end in enumerate(length.cumsum(0).long()):
    kernel = med[start:end]
    L = kernel.mm(kernel.t())
    e, v = custom_eig()(L)
    for j in range(3):
        subset = DPP()(e, v)
        my_list[i].append(subset)
    start = end
new_list = [torch.stack(l) for l in my_list]

#loss = torch.stack(my_list)
#final = loss.sum()
#final.backward()

In [ ]: