In [99]:
%load_ext autoreload
%autoreload 2

import matplotlib.pyplot as plt
from matplotlib import rcParams
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
rcParams['figure.figsize'] = (12,6)

import numpy as np
import pandas as pd
import seaborn as sns

import os

import torch
from torch import nn, optim
from torchvision import models
from nupic.torch.modules import KWinners2d, Flatten
from torchsummary import summary

from utils import Dataset


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [144]:
a = torch.tensor([1,2,3])
b = torch.tensor([4,5,6])

In [244]:
c = torch.ger(a,b)

In [137]:
a = torch.tensor([1,1,0])
b = torch.tensor([0,1,1])

In [138]:
a


Out[138]:
tensor([1, 1, 0])

In [139]:
b


Out[139]:
tensor([0, 1, 1])

In [140]:
a*b


Out[140]:
tensor([0, 1, 0])

In [141]:
a@b


Out[141]:
tensor(1)

In [142]:
torch.ger(b,a)


Out[142]:
tensor([[0, 0, 0],
        [1, 1, 0],
        [1, 1, 0]])

In [143]:
torch.sum(torch.ger(b,a), dim=1)


Out[143]:
tensor([0, 2, 2])

In [267]:
a = {'a': 2}

In [268]:
def test(dic):
    dic2 = dic.copy()
    dic2['a'] = 3
test(a)
a


Out[268]:
{'a': 2}

In [245]:
c


Out[245]:
tensor([[ 4,  5,  6],
        [ 8, 10, 12],
        [12, 15, 18]])

In [251]:
val, ind = torch.kthvalue(c.view(-1), 3)
val, ind


Out[251]:
(tensor(6), tensor(2))

In [256]:
a = {'a':1, 'b':2}

In [258]:
c = {k:v for k,v in a.items()}

In [259]:
c


Out[259]:
{'a': 1, 'b': 2}

In [254]:
c.contiguous().view(-1)


Out[254]:
tensor([ 4,  5,  6,  8, 10, 12, 12, 15, 18])

Setup a simple dataset and model


In [148]:
# load dataset
dataset = Dataset(dict(dataset_name="MNIST", data_dir="~/nta/datasets"))

In [236]:
# load model
from networks import MLPHeb
model = MLPHeb(dict(device='cpu', kwinners=True))
loss_func = nn.CrossEntropyLoss()
# summary(model, (3, 32, 32))

In [222]:
# get one batch
input, target = next(iter(dataset.train_loader))
input.shape, target.shape


Out[222]:
(torch.Size([128, 1, 28, 28]), torch.Size([128]))

In [240]:
# run one forward and loss calculation
output = model.forward(input)
loss = loss_func(output, target)
loss


first creation prev torch.Size([128, 784]) torch.Size([128, 1000])
first creation prev torch.Size([128, 1000]) torch.Size([128, 1000])
first creation prev torch.Size([128, 1000]) torch.Size([128, 1000])
first creation prev torch.Size([128, 1000]) torch.Size([128, 10])
Out[240]:
tensor(2.4300, grad_fn=<NllLossBackward>)

In [224]:
len(model.correlations)


Out[224]:
4

In [237]:
for m in model.modules():
    if hasattr(m, 'weight'):
        print(m.weight.shape)


torch.Size([1000, 784])
torch.Size([1000, 1000])
torch.Size([1000, 1000])
torch.Size([10, 1000])

In [234]:
model.classifier


Out[234]:
[Linear(in_features=784, out_features=1000, bias=False),
 ReLU(),
 Linear(in_features=1000, out_features=1000, bias=False),
 ReLU(),
 Linear(in_features=1000, out_features=1000, bias=False),
 ReLU(),
 Linear(in_features=1000, out_features=10, bias=False)]

In [243]:
for c in model.correlations:
    print(c.t().shape)


torch.Size([1000, 784])
torch.Size([1000, 1000])
torch.Size([1000, 1000])
torch.Size([10, 1000])

In [179]:
c


Out[179]:
tensor([[25.,  9.,  2.,  ..., 49., 28., 40.],
        [47., 11.,  2.,  ..., 72., 40., 64.],
        [40., 12.,  2.,  ..., 63., 28., 51.],
        ...,
        [ 6.,  0.,  0.,  ...,  9.,  2.,  7.],
        [47., 12.,  2.,  ..., 75., 38., 57.],
        [43.,  8.,  1.,  ..., 67., 36., 56.]])

In [ ]:
32*32*64, 16*16*64, 8*8*64, 4*4*128, 2*2*256, 1*1*512

In [141]:
# run a full epoch
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for inputs, targets in dataset.test_loader:
    optimizer.zero_grad()
    outputs = model(inputs)
    loss = loss_func(outputs, targets)
    loss.backward()
    optimizer.step()


---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-141-7d8689d6ac90> in <module>
      3 for inputs, targets in dataset.test_loader:
      4     optimizer.zero_grad()
----> 5     outputs = model(inputs)
      6     loss = loss_func(outputs, targets)
      7     loss.backward()

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    491             result = self._slow_forward(*input, **kwargs)
    492         else:
--> 493             result = self.forward(*input, **kwargs)
    494         for hook in self._forward_hooks.values():
    495             hook_result = hook(self, input, result)

~/nta/nupic.research/projects/dynamic_sparse/networks.py in forward(self, x)
    264                         self.correlations.append(outer_product)
    265                     else:
--> 266                         self.correlations[idx-1] += outer_product
    267                 prev_act = curr_act
    268             idx += 1

RuntimeError: The size of tensor a (2048) must match the size of tensor b (16384) at non-singleton dimension 1

Monitor duty cycle


In [78]:
l=kwinners[0]
l.duty_cycle.view(-1)


Out[78]:
tensor([0.2701, 0.3506, 0.3479, 0.3373, 0.3068, 0.3285, 0.2560, 0.2628, 0.3072,
        0.3014, 0.3104, 0.3046, 0.3292, 0.3081, 0.3436, 0.2817, 0.2963, 0.2982,
        0.2639, 0.2874, 0.3389, 0.2479, 0.2524, 0.2583, 0.2534, 0.3026, 0.3088,
        0.3394, 0.3166, 0.3331, 0.3141, 0.2862, 0.3031, 0.3051, 0.3456, 0.3524,
        0.3028, 0.3090, 0.3048, 0.2549, 0.3194, 0.3124, 0.2668, 0.2460, 0.2610,
        0.3067, 0.3237, 0.3108, 0.3068, 0.2518, 0.3028, 0.3056, 0.3132, 0.3056,
        0.3084, 0.3048, 0.3287, 0.3392, 0.2719, 0.2367, 0.2422, 0.2730, 0.3494,
        0.2920])

In [84]:
correlations = []
prev = None
for l in kwinners:
    if prev is None:
        prev = l.duty_cycle.view(-1)
    else:
        nextt = l.duty_cycle.view(-1)
        correlations.append(torch.ger(prev, nextt))
        prev = nextt

In [87]:
len(correlations)


Out[87]:
4

In [88]:
len(kwinners)


Out[88]:
5

In [90]:
correlations[0].shape


Out[90]:
torch.Size([64, 64])

In [93]:
correlations[-1].shape


Out[93]:
torch.Size([256, 512])

In [100]:
torch.exp(correlations[0][:5,:5])


Out[100]:
tensor([[1.0908, 1.0807, 1.0834, 1.0854, 1.0831],
        [1.1194, 1.1060, 1.1095, 1.1122, 1.1092],
        [1.1184, 1.1051, 1.1087, 1.1113, 1.1083],
        [1.1146, 1.1018, 1.1052, 1.1078, 1.1048],
        [1.1037, 1.0921, 1.0952, 1.0976, 1.0949]])

In [101]:
correlations[0][:5,:5]


Out[101]:
tensor([[0.0869, 0.0776, 0.0801, 0.0819, 0.0798],
        [0.1128, 0.1007, 0.1039, 0.1064, 0.1036],
        [0.1119, 0.1000, 0.1031, 0.1056, 0.1028],
        [0.1085, 0.0969, 0.1000, 0.1024, 0.0997],
        [0.0987, 0.0881, 0.0909, 0.0931, 0.0907]])

In [4]:
import numpy as np
a = np.array([1,2,3,4,5])

In [7]:
a[[2,3]]


Out[7]:
array([3, 4])

In [8]:
a = [1,2,3,4,5]
a[[2,3]]


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-8-9478b8d269fb> in <module>
      1 a = [1,2,3,4,5]
----> 2 a[[2,3]]

TypeError: list indices must be integers or slices, not list

In [ ]: