In [15]:
%load_ext autoreload
%autoreload 2


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [16]:
base_exp_config = dict(
    device="cuda",
    # ----- dataset related ----
    dataset_name="PreprocessedGSC",
    data_dir=os.path.expanduser("~/nta/datasets/gsc"),
    train_batches_per_epoch=5121,
    # batch_size_train=(4, 16),
    batch_size_train=16,
    batch_size_test=20,  # required to fit the GPU
    # ----- network related ----
    network="GSCHeb",
    percent_on_k_winner=[0.095, 0.125, 0.067],
    k_inference_factor=1.5,
    boost_strength=[1.5, 1.5, 1.5],
    boost_strength_factor=[0.9, 0.9, 0.9],
    hidden_neurons_conv=[64, 64],
    hidden_neurons_fc=1500,
    bias=True,
    dropout=False,
    batch_norm=True,
    # ----- model related ----
    model="BaseModel",
    optim_alg="SGD",
    momentum=0,
    learning_rate=0.01,
    weight_decay=0.01,
    lr_scheduler="StepLR",
    lr_gamma=0.9,
    on_perc=[1, 1, 0.1, 1],
    hebbian_prune_perc=None,
    hebbian_grow=False,
    weight_prune_perc=0.3,
    pruning_early_stop=None,  # 2
    # additional validation
    test_noise=True,
    # debugging
    debug_weights=True,
    debug_sparse=True,
)

In [17]:
from nupic.research.frameworks.dynamic_sparse.networks import GSCHeb

In [20]:
net = GSCHeb(config=base_exp_config)

In [21]:
print(net)


GSCHeb(
  (features): Sequential(
    (0): Conv2d(1, 64, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): KWinners2d(channels=64, n=0, percent_on=0.095, boost_strength=1.5, boost_strength_factor=0.9, k_inference_factor=1.5, duty_cycle_period=1000)
    (4): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1))
    (5): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (7): KWinners2d(channels=64, n=0, percent_on=0.125, boost_strength=1.5, boost_strength_factor=0.9, k_inference_factor=1.5, duty_cycle_period=1000)
    (8): Flatten()
  )
  (classifier): Sequential(
    (0): DSLinearBlock(
      (0): DSLinear(in_features=1600, out_features=1500, bias=True)
      (1): BatchNorm1d(1500, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
      (2): KWinners(n=1500, percent_on=0.067, boost_strength=1.5, boost_strength_factor=0.9, k_inference_factor=1.5, duty_cycle_period=1000)
    )
    (1): DSLinearBlock(
      (0): DSLinear(in_features=1500, out_features=12, bias=True)
    )
  )
)

In [11]:
from nupic.research.frameworks.dynamic_sparse.networks import GSCHeb
from torchsummary import summary
import torch
import os

In [12]:
# net = GSCHeb()
net = GSCHeb()
def debug(module, i, o): print(module.__class__.__name__) print(' '* 5, 'in.shape', i[0].shape) print(' '* 5, 'out.shape', o[0].shape) for m in net.features.children(): m.register_forward_hook(debug)

In [13]:
summary(net, input_size=(1,32,32))


before features  torch.Size([2, 1, 32, 32])
after features  torch.Size([2, 1600])
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-13-e4d1587fb389> in <module>
----> 1 summary(net, input_size=(1,32,32))

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torchsummary/torchsummary.py in summary(model, input_size, batch_size, device)
     70     # make a forward pass
     71     # print(x.shape)
---> 72     model(*x)
     73 
     74     # remove these hooks

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

~/nta/nupic.research/nupic/research/frameworks/dynamic_sparse/networks/hebbian.py in forward(self, x)
     95             x = self.features(x)
     96         print('after features ', x.shape)
---> 97         x = self.classifier(x)
     98         print('after classifier ', x.shape)
     99         return x

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

~/nta/nupic.research/nupic/research/frameworks/dynamic_sparse/networks/hebbian.py in forward(self, input_tensor)
     73 
     74     def forward(self, input_tensor):
---> 75         output_tensor = super().forward(input_tensor)
     76         return output_tensor
     77 

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
     90     def forward(self, input):
     91         for module in self._modules.values():
---> 92             input = module(input)
     93         return input
     94 

~/miniconda3/envs/numenta/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

~/nta/nupic.torch/nupic/torch/modules/k_winners.py in forward(self, x)
    191 
    192         if self.training:
--> 193             x = F.KWinners.apply(x, self.duty_cycle, self.k, self.boost_strength)
    194             self.update_duty_cycle(x)
    195         else:

~/nta/nupic.torch/nupic/torch/functions/k_winners.py in forward(ctx, x, duty_cycles, k, boost_strength)
     93             target_density = float(k) / x.size(1)
     94             boost_factors = torch.exp((target_density - duty_cycles) * boost_strength)
---> 95             boosted = x.detach() * boost_factors
     96         else:
     97             boosted = x.detach()

RuntimeError: The size of tensor a (1000) must match the size of tensor b (64) at non-singleton dimension 1

In [26]:
net.train()
for _ in range(1000):
    net(torch.rand(2,1,32,32))

In [2]:
num_dense = sum([784*100, 100*100, 100*100, 100*10])
num_sparse = num_dense * .1
num_dense, num_sparse


Out[2]:
(99400, 9940.0)

In [3]:
# need to sum bias

In [8]:
import sys
sys.path.append("../../")

from nupic.research.frameworks.dynamic_sparse.models import BaseModel
from nupic.research.frameworks.dynamic_sparse.networks import MLP
import torch
import numpy as np
# load a regular network
network = MLP()
# load a regular base model for training
model = BaseModel(network=network)
model.setup()


---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-8-60b65a5b0af0> in <module>
      3 
      4 from nupic.research.frameworks.dynamic_sparse.models import BaseModel
----> 5 from nupic.research.frameworks.dynamic_sparse.networks import MLP
      6 import torch
      7 import numpy as np

ImportError: cannot import name 'MLP' from 'nupic.research.frameworks.dynamic_sparse.networks' (/Users/lsouza/nta/nupic.research/nupic/research/frameworks/dynamic_sparse/networks/__init__.py)

In [9]:
total = 0
for m in network.classifier.modules():
    if isinstance(m, torch.nn.Linear):
        num_weights = np.prod(m.weight.shape)
        num_bias = np.prod(m.bias.shape)
        print(num_weights, num_bias)
        total = total + num_weights + num_bias
        
print("\n", total)
num_dense = total


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-9-820e54e9ae6a> in <module>
      1 total = 0
----> 2 for m in network.classifier.modules():
      3     if isinstance(m, torch.nn.Linear):
      4         num_weights = np.prod(m.weight.shape)
      5         num_bias = np.prod(m.bias.shape)

NameError: name 'network' is not defined

In [10]:
total = 0
for m in network.classifier.modules():
    if isinstance(m, torch.nn.Linear):
        num_weights = int(np.prod(m.weight.shape)*.2)
        num_bias = np.prod(m.bias.shape)
        print(num_weights, num_bias)
        total = total + num_weights + num_bias
        
print("\n", total)
num_sparse = total


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-10-bef70151b991> in <module>
      1 total = 0
----> 2 for m in network.classifier.modules():
      3     if isinstance(m, torch.nn.Linear):
      4         num_weights = int(np.prod(m.weight.shape)*.2)
      5         num_bias = np.prod(m.bias.shape)

NameError: name 'network' is not defined

In [11]:
total = 0
for m in network.classifier.modules():
    if isinstance(m, torch.nn.Linear):
        num_weights = int(np.prod(m.weight.shape)*.1)
        num_bias = np.prod(m.bias.shape)
        print(num_weights, num_bias)
        total = total + num_weights + num_bias
        
print("\n", total)
num_sparse = total


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-11-eed6bff76205> in <module>
      1 total = 0
----> 2 for m in network.classifier.modules():
      3     if isinstance(m, torch.nn.Linear):
      4         num_weights = int(np.prod(m.weight.shape)*.1)
      5         num_bias = np.prod(m.bias.shape)

NameError: name 'network' is not defined

In [12]:
total = 0
for m in network.classifier.modules():
    if isinstance(m, torch.nn.Linear):
        num_weights = int(np.prod(m.weight.shape)*.05)
        num_bias = np.prod(m.bias.shape)
        print(num_weights, num_bias)
        total = total + num_weights + num_bias
        
print("\n", total)
num_sparse = total


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-12-7bb0aab404d2> in <module>
      1 total = 0
----> 2 for m in network.classifier.modules():
      3     if isinstance(m, torch.nn.Linear):
      4         num_weights = int(np.prod(m.weight.shape)*.05)
      5         num_bias = np.prod(m.bias.shape)

NameError: name 'network' is not defined

In [13]:
hs = [12,16,23]
sum([784*hs[0], hs[0]*hs[1], hs[1]*hs[2], hs[2]*10]) + sum(hs)


Out[13]:
10249

GSC network


In [1]:
from nupic.research.frameworks.dynamic_sparse.networks import GSCHeb, GSCHebSmall

In [2]:
network = GSCHeb()

In [3]:
from torchsummary import summary

In [4]:
summary(network, input_size=(1,32,32))


----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1           [-1, 64, 28, 28]           1,664
       BatchNorm2d-2           [-1, 64, 28, 28]               0
         MaxPool2d-3           [-1, 64, 14, 14]               0
        KWinners2d-4           [-1, 64, 14, 14]               0
            Conv2d-5           [-1, 64, 10, 10]         102,464
       BatchNorm2d-6           [-1, 64, 10, 10]               0
         MaxPool2d-7             [-1, 64, 5, 5]               0
        KWinners2d-8             [-1, 64, 5, 5]               0
           Flatten-9                 [-1, 1600]               0
           Linear-10                 [-1, 1000]       1,601,000
      BatchNorm1d-11                 [-1, 1000]               0
         KWinners-12                 [-1, 1000]               0
           Linear-13                   [-1, 12]          12,012
================================================================
Total params: 1,717,140
Trainable params: 1,717,140
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 1.11
Params size (MB): 6.55
Estimated Total Size (MB): 7.67
----------------------------------------------------------------

In [5]:
# Total params = 1,717,140
# 4% of that = 
1717140 * 0.04


Out[5]:
68685.6

In [6]:
64/25


Out[6]:
2.56

In [ ]:


In [7]:
network= GSCHebSmall()
summary(network, input_size=(1,32,32))


----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1           [-1, 12, 28, 28]             312
       BatchNorm2d-2           [-1, 12, 28, 28]               0
         MaxPool2d-3           [-1, 12, 14, 14]               0
        KWinners2d-4           [-1, 12, 14, 14]               0
            Conv2d-5           [-1, 12, 10, 10]           3,612
       BatchNorm2d-6           [-1, 12, 10, 10]               0
         MaxPool2d-7             [-1, 12, 5, 5]               0
        KWinners2d-8             [-1, 12, 5, 5]               0
           Flatten-9                  [-1, 300]               0
           Linear-10                  [-1, 207]          62,307
      BatchNorm1d-11                  [-1, 207]               0
         KWinners-12                  [-1, 207]               0
           Linear-13                   [-1, 12]           2,496
================================================================
Total params: 68,727
Trainable params: 68,727
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.21
Params size (MB): 0.26
Estimated Total Size (MB): 0.48
----------------------------------------------------------------

In [ ]: