In [99]:
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
from matplotlib import rcParams
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
rcParams['figure.figsize'] = (12,6)
import numpy as np
import pandas as pd
import seaborn as sns
import os
import torch
from torch import nn, optim
from torchvision import models
from nupic.torch.modules import KWinners2d, Flatten
from torchsummary import summary
from utils import Dataset
In [144]:
a = torch.tensor([1,2,3])
b = torch.tensor([4,5,6])
In [244]:
c = torch.ger(a,b)
In [137]:
a = torch.tensor([1,1,0])
b = torch.tensor([0,1,1])
In [138]:
a
Out[138]:
In [139]:
b
Out[139]:
In [140]:
a*b
Out[140]:
In [141]:
a@b
Out[141]:
In [142]:
torch.ger(b,a)
Out[142]:
In [143]:
torch.sum(torch.ger(b,a), dim=1)
Out[143]:
In [267]:
a = {'a': 2}
In [268]:
def test(dic):
dic2 = dic.copy()
dic2['a'] = 3
test(a)
a
Out[268]:
In [245]:
c
Out[245]:
In [251]:
val, ind = torch.kthvalue(c.view(-1), 3)
val, ind
Out[251]:
In [256]:
a = {'a':1, 'b':2}
In [258]:
c = {k:v for k,v in a.items()}
In [259]:
c
Out[259]:
In [254]:
c.contiguous().view(-1)
Out[254]:
In [148]:
# load dataset
dataset = Dataset(dict(dataset_name="MNIST", data_dir="~/nta/datasets"))
In [236]:
# load model
from networks import MLPHeb
model = MLPHeb(dict(device='cpu', kwinners=True))
loss_func = nn.CrossEntropyLoss()
# summary(model, (3, 32, 32))
In [222]:
# get one batch
input, target = next(iter(dataset.train_loader))
input.shape, target.shape
Out[222]:
In [240]:
# run one forward and loss calculation
output = model.forward(input)
loss = loss_func(output, target)
loss
Out[240]:
In [224]:
len(model.correlations)
Out[224]:
In [237]:
for m in model.modules():
if hasattr(m, 'weight'):
print(m.weight.shape)
In [234]:
model.classifier
Out[234]:
In [243]:
for c in model.correlations:
print(c.t().shape)
In [179]:
c
Out[179]:
In [ ]:
32*32*64, 16*16*64, 8*8*64, 4*4*128, 2*2*256, 1*1*512
In [141]:
# run a full epoch
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for inputs, targets in dataset.test_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
In [78]:
l=kwinners[0]
l.duty_cycle.view(-1)
Out[78]:
In [84]:
correlations = []
prev = None
for l in kwinners:
if prev is None:
prev = l.duty_cycle.view(-1)
else:
nextt = l.duty_cycle.view(-1)
correlations.append(torch.ger(prev, nextt))
prev = nextt
In [87]:
len(correlations)
Out[87]:
In [88]:
len(kwinners)
Out[88]:
In [90]:
correlations[0].shape
Out[90]:
In [93]:
correlations[-1].shape
Out[93]:
In [100]:
torch.exp(correlations[0][:5,:5])
Out[100]:
In [101]:
correlations[0][:5,:5]
Out[101]:
In [4]:
import numpy as np
a = np.array([1,2,3,4,5])
In [7]:
a[[2,3]]
Out[7]:
In [8]:
a = [1,2,3,4,5]
a[[2,3]]
In [ ]: