In [ ]:
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier
from dpp_nets.helper.plotting import plot_floats, plot_dict

In [ ]:
# Learning a kDPP

# Initialization
network_params = {'set_size': 40, 'n_clusters': 10}
dtype = torch.DoubleTensor
train_it = 1000
batch_size = 10
lr = 1e-3
alpha_iter = 4

torch.manual_seed(10)
no_baseline = SimKDPP(network_params, dtype)

# Pre-Training Performance
print('pre-training performance (no_baseline)')
print(50 * ' ')
torch.manual_seed(99)
no_baseline.evaluate(1000)

# Training
print(50 * '-')
print('training (no_baseline)')
print(50 * ' ')
torch.manual_seed(0)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)

# Evaluation 
print(50 * '-')
print('trained network (no_baseline):')
torch.manual_seed(99)
no_baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
no_baseline.random_benchmark(1000)

In [ ]:
torch.manual_seed(10)
baseline = SimKDPP(network_params, dtype)

# Pre-Training Performance
print('pre-training performance (baseline)')
print(50 * ' ')
torch.manual_seed(99)
baseline.evaluate(1000)

# Training
print(50 * '-')
print('training (baseline)')
print(50 * ' ')
torch.manual_seed(0)
baseline.train(train_it, batch_size, lr, alpha_iter, True)

# Evaluation 
print(50 * '-')
print('trained network (baseline):')
torch.manual_seed(99)
baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
baseline.random_benchmark(1000)

In [ ]:
plot_floats(baseline.rec_dict,500, 'rec_kDPP', 'Recall (over batch and sampled subsets)', 'Training Iteration', 'Mean Recall')

In [ ]:
# Learning a kDPP

plot_floats(no_baseline.loss_dict,50,"no_baseline_loss_1e5_kDPP.pdf","Loss","Training Iteration", "Custom_Loss")
plot_floats(baseline.loss_dict,50,"baseline_loss_1e5_kDPP.pdf","Loss","Training Iteration", "Custom_Loss")

In [ ]:
from dpp_nets.helper.plotting import plot_embd, gen_matrix_from_cluster_ix, plot_matrix
from torch.autograd import Variable
words, context, target = loaded.generate(1)
x = Variable(torch.cat([words, context],dim=2)).squeeze(0)
embd = baseline.kernel_net(x).data
plot_embd(x.data)
plot_embd(embd)
plot_matrix(gen_matrix_from_cluster_ix(target))

In [ ]:
state_dict = torch.load('VIMCO_kDPP_10k_10k_10k.pt')

In [ ]:
loaded = SimKDPP(network_params, dtype)

In [ ]:
loaded.kernel_net.load_state_dict(state_dict)

In [ ]:
import numpy as np
embd = words.numpy()
embd_copy = embd.copy()
index = target.numpy()
arg = np.argsort(index)
arg

In [ ]:
print(index)
print(arg)

In [ ]:
embd_copy = embd_copy.squeeze(0)
embd_copy[arg] =  embd

In [ ]:
L = embd_copy.dot(embd_copy.T)
K = L.dot(np.linalg.inv(L + np.eye(40)))

In [ ]:
plot_matrix(K)

In [ ]:
# Learning a Filter - No Baseline

# Initialization
network_params = {'set_size': 40, 'n_clusters': 10, 'max_sig': 10}
dtype = torch.DoubleTensor
train_it = 5000
batch_size = 10
lr = 1e-5
alpha_iter = 4

torch.manual_seed(10)
no_baseline = SimFilter(network_params, dtype)

# Pre-Training Performance
print('pre-training performance (no_baseline)')
print(50 * ' ')
torch.manual_seed(99)
no_baseline.evaluate(1000)

# Training
print(50 * '-')
print('training (no_baseline)')
print(50 * ' ')
torch.manual_seed(0)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)

# Evaluation 
print(50 * '-')
print('trained network (no_baseline):')
torch.manual_seed(99)
no_baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
no_baseline.random_benchmark(1000)

In [ ]:
# Learning a Filter - Baseline

# Initialization

torch.manual_seed(10)
baseline = SimFilter(network_params, dtype)

# Pre-Training Performance
print('pre-training performance (baseline)')
print(50 * ' ')
torch.manual_seed(99)
baseline.evaluate(1000)

# Training
print(50 * '-')
print('training (baseline)')
print(50 * ' ')
torch.manual_seed(0)
baseline.train(train_it, batch_size, lr, alpha_iter, True)

# Evaluation 
print(50 * '-')
print('trained network (baseline):')
torch.manual_seed(99)
baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
baseline.random_benchmark(1000)

In [ ]:
plot_floats(baseline.loss_dict,20)

In [ ]:
torch.manual_seed(22)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)
torch.manual_seed(22)
baseline.train(train_it, batch_size, lr, alpha_iter, True)

In [ ]:
torch.manual_seed(99)
no_baseline.evaluate(1000)
torch.manual_seed(99)
baseline.evaluate(1000)

In [ ]:


In [ ]:
from dpp_nets.my_torch.simulator import SimKDPPDeepSet
import torch
network_params = {'set_size': 40, 'n_clusters': 10}
dtype = torch.DoubleTensor
sim = SimKDPPDeepSet(network_params, dtype)

train_iter = 1000
batch_size = 10
lr = 1e-3
alpha_iter=5
baseline=True
reg=0
reg_mean=0

sim.train(train_iter, batch_size, lr, alpha_iter, baseline, reg, reg_mean)

In [ ]:
import torch
torch.sin(torch.arange(0,10))

In [ ]:
input_set_size = 40
pred_in = 20
aspects_n = 3

words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)

# Compute signals
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)
words[ixs] = torch.zeros(aspects_n, pred_in)

In [ ]:
torch.ones(aspects_n, pred_in).diag()

In [ ]:
signal_faces = torch.FloatTensor(aspects_n, pred_in - aspects_n).random_(2)

words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)

# Compute signals

signals = torch.cat([target.diag(), signal_faces],dim=1)
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)
words[ixs] = signals

In [ ]:
torch.gather(target.long(),0)

In [ ]:
signal_clusters = torch.rand(2 * aspects_n, pred_in)
signal_cluster_var = 0.1

In [ ]:
words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)

In [ ]:
signal = signal_clusters[target.long() + torch.LongTensor([0,2,4])].normal_(signal_cluster_var)
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)

In [ ]:
words.sum(0).expand_as(words)

In [ ]:
signal_clusters

In [ ]:
words

In [ ]:
target

In [ ]:
signal_clusters

In [ ]:
import torch.nn as nn

In [ ]:
loss = nn.BCELoss()(Variable(torch.FloatTensor([0.4, 0.4, 0.4])),Variable(target))

In [ ]:
loss.data[0]

In [ ]:
# New Classification Simulation
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier
from dpp_nets.helper.plotting import plot_floats, plot_dict

input_set_size = 50
aspects_n = 4
dtype = torch.DoubleTensor

my_classifier = SimulClassifier(input_set_size, aspects_n, dtype)

In [ ]:
my_classifier.train(2000, 50, 3, baseline=True, lr=1e-4)

In [ ]:
my_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
my_classifier.train_deterministic_baseline(2000,50, lr=1e-3)

In [ ]:
my_classifier.signal_clusters[2] - my_classifier.signal_clusters[3]

In [1]:
# Small Classifier (Deterministic vs -.-)
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier

input_set_size = 20
aspects_n = 4
dtype = torch.DoubleTensor

small_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
small_classifier.train_deterministic(100,50, lr=1e-3)
small_classifier.evaluate_deterministic(1000)


0.6955394071569249
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-1-7d7de3e654ca> in <module>()
      8 
      9 small_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
---> 10 small_classifier.train_deterministic(100,50, lr=1e-3)
     11 small_classifier.evaluate_deterministic(1000)

~/git/dpp_nets/dpp_nets/my_torch/simulator2.py in train_deterministic(self, train_steps, batch_size, lr)
    265             cum_loss += loss.data[0]
    266 
--> 267             loss.backward()
    268 
    269             if not ((t + 1) % batch_size):

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/autograd/variable.py in backward(self, gradient, retain_graph, create_graph, retain_variables)
    154                 Variable.
    155         """
--> 156         torch.autograd.backward(self, gradient, retain_graph, create_graph, retain_variables)
    157 
    158     def register_hook(self, hook):

~/Coding/anaconda2/envs/torch2/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(variables, grad_variables, retain_graph, create_graph, retain_variables)
     96 
     97     Variable._execution_engine.run_backward(
---> 98         variables, grad_variables, retain_graph)
     99 
    100 

KeyboardInterrupt: 

In [3]:
small_classifier.evaluate(1000)
small_classifier.train(500, 40, 5, baseline=True, lr=1e-3, reg=0.1, reg_mean=aspects_n)
small_classifier.evaluate(1000)


0.6957460366797257 0.49875 0.21678333333333363 0.1195 2.235
0.6826915068463202
0.6359640271290304
0.5649463325185554
0.4806092875264954
0.38255124526205714
0.3933576088423872
0.3003813065476759
0.2535500495353459
0.2025467626198719
0.24612959764892206
0.23401284167607148
0.17823382197116036
0.2111565061908231
0.15340093581929415
0.1718755433414967
0.15030250157857664
0.1800069691419908
0.1952436550620197
0.18773064253333405
0.15362130008033373
0.17022277615002201
0.16460439776623814
0.1769878780817935
0.13001421625546222
0.1630046628029125
0.1568533202868164
0.15184596695856353
0.172527074681803
0.16127061324824404
0.14916090087563685
0.19116077113629074
0.15286439115710412
0.17627055778658604
0.15005651319367364
0.16732544648266007
0.13373493191493438
0.14939090578408776
0.153744336697088
0.16460570752393078
0.17551594933477607
0.15333396174724334
0.17500369168351557
0.16349749118640045
0.15824119786724314
0.13395961301691994
0.17201826633620831
0.1459868834287504
0.15579733199363677
0.13891829390942662
0.16708392961591698
0.13958427897350978 0.9385 0.6603666666666681 0.77825 4.727

In [11]:
small_classifier.evaluate(1000)
small_classifier.train(1000, 50, 5, baseline=True, lr=1e-2, reg=0.1, reg_mean=aspects_n)
small_classifier.evaluate(1000)


0.09165022239349753 0.955 0.6895333333333336 0.78675 4.566
8.34765066972357
0.7293472822847679
0.7182852193532924
0.6604204187757324
0.5739920951795936
0.43094325415766827
0.32967180952613384
0.28299776869468524
0.28408176064707513
0.2717932788676054
0.27289985459512917
0.3262388077019293
0.28988938135196196
0.30916837645542655
0.3112341744059382
0.2641968235331603
0.2501057140746098
0.2808532830348861
0.250867058986055
0.2744109996264541
0.25649495644162346
0.2706375581346042
0.2647444465844877
0.2553977739791478
0.2654090770295347
0.30064246058701805
0.27105755524801145
0.2664770745281301
0.2811813677875658
0.276165922075917
0.28464242010467405
0.27654754128658166
0.2606517199869129
0.26241744224281144
0.23397606804828144
0.2673403871418787
0.2685798215415929
0.25220217777748877
0.2557229529552624
0.21699182408539278
0.19198756669800332
0.16712528288813472
0.17292212277588057
0.1515671358517551
0.1961011120493832
0.13959828768649446
0.13031164484152702
0.1760633178454498
0.1716843983676855
0.16058426865787476
0.1199133116904462
0.15709918013372906
0.1436666141252734
0.15056833569371803
0.12477977137131611
0.14191008100792613
0.15648469518837696
0.14026663315995444
0.1513634316220039
0.12943412678360486
0.1492875206239754
0.13596521197306408
0.1215156583967511
0.17237796942512065
0.13958648064374096
0.15828492570388003
0.12925088392952544
0.14416316198004553
0.13305377629208107
0.14669157674996625
0.1408409291550912
0.105567390060724
0.11378015029414412
0.13711099513135602
0.11320200940489698
0.14426509226539136
0.1261452637864579
0.12694567861454564
0.12460018128115156
0.1378034936055868
0.11943094097646324
0.16983129371412004
0.14715353581873955
0.13124501818494216
0.13753885460390097
0.130881407153718
0.13415384395339136
0.1489448780198181
0.1269260520719765
0.12257470182963001
0.11945884895112167
0.11967448949089626
0.12014569358195808
0.1200511636117144
0.09936496041930826
0.09576980862908104
0.10587661746994205
0.10114724117788013
0.110940792995274
0.13333678623222459
0.10354437664604291 0.944 0.6353333333333362 0.79575 5.012

In [6]:
small_classifier.evaluate(1000)


0.09382678532092885 0.951 0.6884714285714283 0.779 4.525

In [10]:
small_classifier.sample()


Target is:  
 1
 0
 1
 1
[torch.DoubleTensor of size 4]

Pred is:  
 0.9998
 0.0018
 0.9981
 0.9997
[torch.DoubleTensor of size 4]

Loss is: 0.00105701858179579
Acc is: 1.0
Subset is: 
 0
 0
 0
 0
 0
 1
 1
 0
 0
 1
 1
 0
 0
 0
 0
 0
 0
 0
 0
 1
[torch.DoubleTensor of size 20]

Ix is: 
 10
  9
  6
  5
[torch.LongTensor of size 4]

Subset statistics are: 0.8 1.0 5.0
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-10-d016aeb82399> in <module>()
----> 1 small_classifier.sample()

~/git/dpp_nets/dpp_nets/my_torch/simulator2.py in sample(self)
    244         print('Ix is:', ixs)
    245         print('Subset statistics are:', precision, recall, set_size)
--> 246 
    247         return words, context, ixs, target, self.pred, loss, subset
    248 

NameError: name 'pred' is not defined

In [ ]: