Goal: Investigate how DSNN fares in a toy problem.
Compare following models:
In [1]:
%load_ext autoreload
%autoreload 2
In [2]:
# general imports
import os
import numpy as np
# torch imports
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as schedulers
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchsummary import summary
# nupic research imports
from nupic.research.frameworks.pytorch.image_transforms import RandomNoise
from nupic.torch.modules import KWinners
# local library
from networks_module.base_networks import *
from models_module.base_models import *
# local files
from utils import *
import math
# plotting
import matplotlib.pyplot as plt
from matplotlib import rcParams
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
rcParams['figure.figsize'] = (12,6)
PATH_TO_WHERE_DATASET_WILL_BE_SAVED = PATH = "~/nta/datasets"
In [3]:
from models_module.base_models import BaseModel, SparseModel, DSNNMixedHeb
from networks_module.hebbian_networks import MLP, MLPHeb
In [4]:
# load dataset
config = (dict(
dataset_name="MNIST",
data_dir="~/nta/datasets",
test_noise=True
))
dataset = Dataset(config)
In [13]:
test_noise = True
use_kwinners = True
epochs = 15
on_perc = 0.1
# large dense
config = dict(hidden_sizes=[100,100,100], use_kwinners=use_kwinners)
network = MLP(config=config)
config = dict(debug_weights=True)
model = BaseModel(network=network, config=config)
model.setup()
print("\nLarge Dense")
large_dense = model.train(dataset, epochs, test_noise=test_noise);
In [15]:
large_dense
Out[15]:
In [16]:
results = large_dense
h, w = math.ceil(len(results)/4), 4
combinations = []
for i in range(h):
for j in range(w):
combinations.append((i,j))
fig, axs = plt.subplots(h, w, gridspec_kw={'hspace': 0.5, 'wspace': 0.5})
fig.set_size_inches(16,16)
for (i, j), k in zip(combinations[:len(results)], sorted(results.keys())):
axs[i, j].plot(range(len(results[k])), results[k])
axs[i, j].set_title(k)
In [ ]: