In [ ]:
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier
from dpp_nets.helper.plotting import plot_floats, plot_dict
In [ ]:
# Learning a kDPP
# Initialization
network_params = {'set_size': 40, 'n_clusters': 10}
dtype = torch.DoubleTensor
train_it = 1000
batch_size = 10
lr = 1e-3
alpha_iter = 4
torch.manual_seed(10)
no_baseline = SimKDPP(network_params, dtype)
# Pre-Training Performance
print('pre-training performance (no_baseline)')
print(50 * ' ')
torch.manual_seed(99)
no_baseline.evaluate(1000)
# Training
print(50 * '-')
print('training (no_baseline)')
print(50 * ' ')
torch.manual_seed(0)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)
# Evaluation
print(50 * '-')
print('trained network (no_baseline):')
torch.manual_seed(99)
no_baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
no_baseline.random_benchmark(1000)
In [ ]:
torch.manual_seed(10)
baseline = SimKDPP(network_params, dtype)
# Pre-Training Performance
print('pre-training performance (baseline)')
print(50 * ' ')
torch.manual_seed(99)
baseline.evaluate(1000)
# Training
print(50 * '-')
print('training (baseline)')
print(50 * ' ')
torch.manual_seed(0)
baseline.train(train_it, batch_size, lr, alpha_iter, True)
# Evaluation
print(50 * '-')
print('trained network (baseline):')
torch.manual_seed(99)
baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
baseline.random_benchmark(1000)
In [ ]:
plot_floats(baseline.rec_dict,500, 'rec_kDPP', 'Recall (over batch and sampled subsets)', 'Training Iteration', 'Mean Recall')
In [ ]:
# Learning a kDPP
plot_floats(no_baseline.loss_dict,50,"no_baseline_loss_1e5_kDPP.pdf","Loss","Training Iteration", "Custom_Loss")
plot_floats(baseline.loss_dict,50,"baseline_loss_1e5_kDPP.pdf","Loss","Training Iteration", "Custom_Loss")
In [ ]:
from dpp_nets.helper.plotting import plot_embd, gen_matrix_from_cluster_ix, plot_matrix
from torch.autograd import Variable
words, context, target = loaded.generate(1)
x = Variable(torch.cat([words, context],dim=2)).squeeze(0)
embd = baseline.kernel_net(x).data
plot_embd(x.data)
plot_embd(embd)
plot_matrix(gen_matrix_from_cluster_ix(target))
In [ ]:
state_dict = torch.load('VIMCO_kDPP_10k_10k_10k.pt')
In [ ]:
loaded = SimKDPP(network_params, dtype)
In [ ]:
loaded.kernel_net.load_state_dict(state_dict)
In [ ]:
import numpy as np
embd = words.numpy()
embd_copy = embd.copy()
index = target.numpy()
arg = np.argsort(index)
arg
In [ ]:
print(index)
print(arg)
In [ ]:
embd_copy = embd_copy.squeeze(0)
embd_copy[arg] = embd
In [ ]:
L = embd_copy.dot(embd_copy.T)
K = L.dot(np.linalg.inv(L + np.eye(40)))
In [ ]:
plot_matrix(K)
In [ ]:
# Learning a Filter - No Baseline
# Initialization
network_params = {'set_size': 40, 'n_clusters': 10, 'max_sig': 10}
dtype = torch.DoubleTensor
train_it = 5000
batch_size = 10
lr = 1e-5
alpha_iter = 4
torch.manual_seed(10)
no_baseline = SimFilter(network_params, dtype)
# Pre-Training Performance
print('pre-training performance (no_baseline)')
print(50 * ' ')
torch.manual_seed(99)
no_baseline.evaluate(1000)
# Training
print(50 * '-')
print('training (no_baseline)')
print(50 * ' ')
torch.manual_seed(0)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)
# Evaluation
print(50 * '-')
print('trained network (no_baseline):')
torch.manual_seed(99)
no_baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
no_baseline.random_benchmark(1000)
In [ ]:
# Learning a Filter - Baseline
# Initialization
torch.manual_seed(10)
baseline = SimFilter(network_params, dtype)
# Pre-Training Performance
print('pre-training performance (baseline)')
print(50 * ' ')
torch.manual_seed(99)
baseline.evaluate(1000)
# Training
print(50 * '-')
print('training (baseline)')
print(50 * ' ')
torch.manual_seed(0)
baseline.train(train_it, batch_size, lr, alpha_iter, True)
# Evaluation
print(50 * '-')
print('trained network (baseline):')
torch.manual_seed(99)
baseline.evaluate(1000)
print(20 * '-')
print('random benchmark:')
torch.manual_seed(99)
baseline.random_benchmark(1000)
In [ ]:
plot_floats(baseline.loss_dict,20)
In [ ]:
torch.manual_seed(22)
no_baseline.train(train_it, batch_size, lr, alpha_iter, False)
torch.manual_seed(22)
baseline.train(train_it, batch_size, lr, alpha_iter, True)
In [ ]:
torch.manual_seed(99)
no_baseline.evaluate(1000)
torch.manual_seed(99)
baseline.evaluate(1000)
In [ ]:
In [ ]:
from dpp_nets.my_torch.simulator import SimKDPPDeepSet
import torch
network_params = {'set_size': 40, 'n_clusters': 10}
dtype = torch.DoubleTensor
sim = SimKDPPDeepSet(network_params, dtype)
train_iter = 1000
batch_size = 10
lr = 1e-3
alpha_iter=5
baseline=True
reg=0
reg_mean=0
sim.train(train_iter, batch_size, lr, alpha_iter, baseline, reg, reg_mean)
In [ ]:
import torch
torch.sin(torch.arange(0,10))
In [ ]:
input_set_size = 40
pred_in = 20
aspects_n = 3
words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)
# Compute signals
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)
words[ixs] = torch.zeros(aspects_n, pred_in)
In [ ]:
torch.ones(aspects_n, pred_in).diag()
In [ ]:
signal_faces = torch.FloatTensor(aspects_n, pred_in - aspects_n).random_(2)
words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)
# Compute signals
signals = torch.cat([target.diag(), signal_faces],dim=1)
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)
words[ixs] = signals
In [ ]:
torch.gather(target.long(),0)
In [ ]:
signal_clusters = torch.rand(2 * aspects_n, pred_in)
signal_cluster_var = 0.1
In [ ]:
words = torch.rand(input_set_size, pred_in)
target = torch.FloatTensor(aspects_n).random_(2)
In [ ]:
signal = signal_clusters[target.long() + torch.LongTensor([0,2,4])].normal_(signal_cluster_var)
ixs = torch.multinomial(torch.arange(0, input_set_size), 3)
In [ ]:
words.sum(0).expand_as(words)
In [ ]:
signal_clusters
In [ ]:
words
In [ ]:
target
In [ ]:
signal_clusters
In [ ]:
import torch.nn as nn
In [ ]:
loss = nn.BCELoss()(Variable(torch.FloatTensor([0.4, 0.4, 0.4])),Variable(target))
In [ ]:
loss.data[0]
In [ ]:
# New Classification Simulation
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier
from dpp_nets.helper.plotting import plot_floats, plot_dict
input_set_size = 50
aspects_n = 4
dtype = torch.DoubleTensor
my_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
In [ ]:
my_classifier.train(2000, 50, 3, baseline=True, lr=1e-4)
In [ ]:
my_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
my_classifier.train_deterministic_baseline(2000,50, lr=1e-3)
In [ ]:
my_classifier.signal_clusters[2] - my_classifier.signal_clusters[3]
In [1]:
# Small Classifier (Deterministic vs -.-)
import torch
from dpp_nets.my_torch.simulator2 import SimulClassifier
input_set_size = 20
aspects_n = 4
dtype = torch.DoubleTensor
small_classifier = SimulClassifier(input_set_size, aspects_n, dtype)
small_classifier.train_deterministic(100,50, lr=1e-3)
small_classifier.evaluate_deterministic(1000)
In [3]:
small_classifier.evaluate(1000)
small_classifier.train(500, 40, 5, baseline=True, lr=1e-3, reg=0.1, reg_mean=aspects_n)
small_classifier.evaluate(1000)
In [11]:
small_classifier.evaluate(1000)
small_classifier.train(1000, 50, 5, baseline=True, lr=1e-2, reg=0.1, reg_mean=aspects_n)
small_classifier.evaluate(1000)
In [6]:
small_classifier.evaluate(1000)
In [10]:
small_classifier.sample()
In [ ]: