In [1]:
%load_ext autoreload
%autoreload 2
In [41]:
import torch
import torch.nn.functional as F
import torch.nn as nn
In [3]:
w1 = torch.randn((3,4))
w2 = torch.randn((4,5))
In [4]:
u0 = torch.randn((1,3))
u0_sample1 = torch.randn((1,4))
u0_sample2 = torch.randn((1,4))
u0_sample3 = torch.randn((1,4))
In [5]:
u1 = u0 @ w1
a1 = F.relu(u1)
u2 = a1 @ w2
a2 = F.relu(u2)
In [6]:
u0, u1, u2
Out[6]:
In [7]:
a1, a2
bin_a1 = a1.bool()
bin_a2 = a2.bool()
bin_a1, bin_a2
torch.ger(bin_a1.view(-1).int(), bin_a2.view(-1).int())
Out[7]:
In [8]:
bin_a1, bin_a2
Out[8]:
In [9]:
l0 @ w0
In [ ]:
for i in range(0):
print('test')
In [10]:
[1] + [2]
Out[10]:
In [ ]:
In [ ]:
In [ ]:
In [11]:
l1 = torch.rand(4,4) - 0.5
l2 = torch.rand(4,4) - 0.5
In [12]:
l1, l2
Out[12]:
In [13]:
F.relu(l1)
Out[13]:
In [14]:
i = torch.randn((1,4))
i
Out[14]:
In [15]:
h1 = i @ l1
h1
Out[15]:
In [16]:
h2 = h1 @ l2
h2
Out[16]:
In [17]:
from nupic.research.frameworks.dynamic_sparse import networks
from torchsummary import summary
In [18]:
# net = networks.MLP()
# net = networks.MLPHeb(config=dict(
# input_size=784,
# num_classes=10,
# hidden_sizes=[100,100,100],
# use_kwinners=False,
# ))
In [38]:
net = networks.MLPHeb(config=dict(
input_size=3,
num_classes=2,
hidden_sizes=[4, 5],
use_kwinners=False,
))
In [39]:
summary(net, input_size=(1,3))
In [43]:
for m in net.modules():
if isinstance(m, nn.Linear):
print(m.weight.shape)
In [47]:
m2 = m.weight.data.T
In [55]:
torch.ger((m2 > 0).int().view(-1), (m2 > 0).int().view(-1))
Out[55]:
In [52]:
m2
Out[52]:
In [ ]: